Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Feb 2015 00:45:56 +0000 (16:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Feb 2015 00:45:56 +0000 (16:45 -0800)
Merge misc updates from Andrew Morton:
 "Bite-sized chunks this time, to avoid the MTA ratelimiting woes.

   - fs/notify updates

   - ocfs2

   - some of MM"

That laconic "some MM" is mainly the removal of remap_file_pages(),
which is a big simplification of the VM, and which gets rid of a *lot*
of random cruft and special cases because we no longer support the
non-linear mappings that it used.

From a user interface perspective, nothing has changed, because the
remap_file_pages() syscall still exists, it's just done by emulating the
old behavior by creating a lot of individual small mappings instead of
one non-linear one.

The emulation is slower than the old "native" non-linear mappings, but
nobody really uses or cares about remap_file_pages(), and simplifying
the VM is a big advantage.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (78 commits)
  memcg: zap memcg_slab_caches and memcg_slab_mutex
  memcg: zap memcg_name argument of memcg_create_kmem_cache
  memcg: zap __memcg_{charge,uncharge}_slab
  mm/page_alloc.c: place zone_id check before VM_BUG_ON_PAGE check
  mm: hugetlb: fix type of hugetlb_treat_as_movable variable
  mm, hugetlb: remove unnecessary lower bound on sysctl handlers"?
  mm: memory: merge shared-writable dirtying branches in do_wp_page()
  mm: memory: remove ->vm_file check on shared writable vmas
  xtensa: drop _PAGE_FILE and pte_file()-related helpers
  x86: drop _PAGE_FILE and pte_file()-related helpers
  unicore32: drop pte_file()-related helpers
  um: drop _PAGE_FILE and pte_file()-related helpers
  tile: drop pte_file()-related helpers
  sparc: drop pte_file()-related helpers
  sh: drop _PAGE_FILE and pte_file()-related helpers
  score: drop _PAGE_FILE and pte_file()-related helpers
  s390: drop pte_file()-related helpers
  parisc: drop _PAGE_FILE and pte_file()-related helpers
  openrisc: drop _PAGE_FILE and pte_file()-related helpers
  nios2: drop _PAGE_FILE and pte_file()-related helpers
  ...

495 files changed:
Documentation/acpi/enumeration.txt
Documentation/cpu-freq/intel-pstate.txt
Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/versatile.txt [new file with mode: 0644]
Documentation/filesystems/xfs.txt
Documentation/kernel-parameters.txt
Documentation/power/s2ram.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/boot/dts/versatile-pb.dts
arch/arm/include/asm/mach/pci.h
arch/arm/include/asm/pci.h
arch/arm/include/asm/xen/page.h
arch/arm/kernel/bios32.c
arch/arm/mach-cns3xxx/pcie.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-ks8695/pci.c
arch/arm/mach-sa1100/pci-nanoengine.c
arch/arm/xen/enlighten.c
arch/arm/xen/mm.c
arch/arm/xen/p2m.c
arch/arm64/kernel/pci.c
arch/frv/mb93090-mb00/pci-vdk.c
arch/ia64/kernel/acpi-ext.c
arch/ia64/kernel/acpi.c
arch/ia64/pci/pci.c
arch/m68k/atari/atakeyb.c
arch/m68k/atari/stdma.c
arch/m68k/atari/time.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/atariints.h
arch/m68k/include/asm/futex.h [deleted file]
arch/m68k/include/asm/macintosh.h
arch/m68k/mac/config.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/rtc.c
arch/microblaze/boot/Makefile
arch/microblaze/boot/dts/Makefile
arch/microblaze/include/asm/delay.h
arch/microblaze/include/asm/kgdb.h
arch/microblaze/include/asm/linkage.h
arch/microblaze/include/asm/pgalloc.h
arch/microblaze/include/asm/syscall.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/cpu/cache.c
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
arch/microblaze/kernel/cpu/cpuinfo-static.c
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/kgdb.c
arch/microblaze/kernel/prom_parse.c [deleted file]
arch/microblaze/kernel/ptrace.c
arch/microblaze/kernel/reset.c
arch/microblaze/kernel/signal.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/unwind.c
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mn10300/unit-asb2305/pci.c
arch/powerpc/platforms/cell/celleb_scc_pciex.c
arch/powerpc/platforms/powermac/pci.c
arch/powerpc/sysdev/fsl_pci.c
arch/tile/kernel/pci.c
arch/x86/Kconfig
arch/x86/include/asm/pci_x86.h
arch/x86/include/asm/xen/page.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/pci/acpi.c
arch/x86/pci/bus_numa.c
arch/x86/pci/common.c
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/irq.c
arch/x86/pci/mmconfig-shared.c
arch/x86/pci/xen.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/time.c
arch/x86/xen/xen-ops.h
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/acpi_apd.c [new file with mode: 0644]
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_memhotplug.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpica/acapps.h
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acopcode.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsmthdat.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evhandler.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exoparg6.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstoren.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/hwpci.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsalloc.c
drivers/acpi/acpica/nsarguments.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsprepkg.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/nsxfobj.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psopinfo.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/pswalk.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rsaddr.c
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rsdumpinfo.c
drivers/acpi/acpica/rsinfo.c
drivers/acpi/acpica/rsio.c
drivers/acpi/acpica/rsirq.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmemory.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsserial.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/uterror.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utexcep.c
drivers/acpi/acpica/utfileio.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/uthex.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utownerid.c
drivers/acpi/acpica/utpredef.c
drivers/acpi/acpica/utprint.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/acpica/utstate.c
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utuuid.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/acpica/utxfmutex.c
drivers/acpi/device_pm.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/ioapic.c [new file with mode: 0644]
drivers/acpi/numa.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_root.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/acpi/video.c
drivers/base/power/common.c
drivers/base/power/domain.c
drivers/base/power/opp.c
drivers/base/power/qos.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/char/hpet.c
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/ls1x-cpufreq.c
drivers/cpufreq/sfi-cpufreq.c [new file with mode: 0644]
drivers/cpuidle/cpuidle-big_little.c
drivers/devfreq/Kconfig
drivers/devfreq/Makefile
drivers/devfreq/devfreq-event.c [new file with mode: 0644]
drivers/devfreq/event/Kconfig [new file with mode: 0644]
drivers/devfreq/event/Makefile [new file with mode: 0644]
drivers/devfreq/event/exynos-ppmu.c [new file with mode: 0644]
drivers/devfreq/event/exynos-ppmu.h [new file with mode: 0644]
drivers/devfreq/tegra-devfreq.c [new file with mode: 0644]
drivers/dma/acpi-dma.c
drivers/hv/vmbus_drv.c
drivers/mailbox/pcc.c
drivers/net/ethernet/amd/atarilance.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/of/of_pci.c
drivers/parport/parport_atari.c
drivers/pci/access.c
drivers/pci/bus.c
drivers/pci/host-bridge.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-host-generic.c
drivers/pci/host/pci-keystone.c
drivers/pci/host/pci-layerscape.c
drivers/pci/host/pci-mvebu.c
drivers/pci/host/pci-rcar-gen2.c
drivers/pci/host/pci-tegra.c
drivers/pci/host/pci-versatile.c [new file with mode: 0644]
drivers/pci/host/pci-xgene.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-rcar.c
drivers/pci/host/pcie-xilinx.c
drivers/pci/hotplug/cpci_hotplug_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/sgi_hotplug.c
drivers/pci/msi.c
drivers/pci/pci-acpi.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aspm.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/rom.c
drivers/pnp/pnpacpi/rsparser.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/scsi/esas2r/esas2r_init.c
drivers/sfi/sfi_core.c
drivers/usb/core/hub.c
drivers/video/fbdev/atafb.c
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/tmem.c
drivers/xen/xen-acpi-memhotplug.c
drivers/xen/xen-scsiback.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/cifs/file.c
fs/ext3/super.c
fs/ext4/super.c
fs/gfs2/acl.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/inode.c
fs/gfs2/recovery.c
fs/gfs2/sys.c
fs/inode.c
fs/isofs/util.c
fs/lockd/svcsubs.c
fs/locks.c
fs/nfs/delegation.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/write.c
fs/nfsd/nfs4state.c
fs/ocfs2/quota.h
fs/ocfs2/quota_local.c
fs/ocfs2/super.c
fs/quota/dquot.c
fs/quota/quota.c
fs/quota/quota_v1.c
fs/quota/quota_v2.c
fs/read_write.c
fs/udf/Kconfig
fs/udf/inode.c
fs/udf/super.c
fs/xfs/kmem.c
fs/xfs/kmem.h
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_fs.h [moved from fs/xfs/xfs_fs.h with 100% similarity]
fs/xfs/libxfs/xfs_sb.c
fs/xfs/libxfs/xfs_sb.h
fs/xfs/libxfs/xfs_shared.h
fs/xfs/libxfs/xfs_symlink_remote.c
fs/xfs/libxfs/xfs_trans_resv.c
fs/xfs/libxfs/xfs_trans_resv.h
fs/xfs/libxfs/xfs_types.h [moved from fs/xfs/xfs_types.h with 100% similarity]
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.h
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_dquot.h
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl32.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_log.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sysctl.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans_buf.c
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acenvex.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/acpi/platform/aclinuxex.h
include/linux/acpi.h
include/linux/cpufreq.h
include/linux/devfreq-event.h [new file with mode: 0644]
include/linux/dqblk_v1.h
include/linux/fs.h
include/linux/jbd.h
include/linux/jbd2.h
include/linux/mm.h
include/linux/page-flags.h
include/linux/pci.h
include/linux/pm.h
include/linux/pm_domain.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/resource_ext.h [new file with mode: 0644]
include/uapi/linux/pci_regs.h
include/uapi/linux/quota.h
include/xen/grant_table.h
include/xen/interface/features.h
include/xen/interface/grant_table.h
kernel/power/qos.c
kernel/power/snapshot.c
kernel/resource.c
kernel/trace/power-traces.c
mm/memory.c
sound/oss/dmasound/dmasound_atari.c
tools/power/acpi/common/cmfsize.c
tools/power/acpi/common/getopt.c
tools/power/acpi/os_specific/service_layers/oslibcfs.c
tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
tools/power/acpi/os_specific/service_layers/osunixdir.c
tools/power/acpi/os_specific/service_layers/osunixmap.c
tools/power/acpi/os_specific/service_layers/osunixxf.c
tools/power/acpi/tools/acpidump/acpidump.h
tools/power/acpi/tools/acpidump/apdump.c
tools/power/acpi/tools/acpidump/apfiles.c
tools/power/acpi/tools/acpidump/apmain.c
tools/power/cpupower/Makefile
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c

index b60d2ab..9b121a5 100644 (file)
@@ -243,7 +243,7 @@ input driver:
                        .owner  = THIS_MODULE,
                        .pm     = &mpu3050_pm,
                        .of_match_table = mpu3050_of_match,
-                       .acpi_match_table  ACPI_PTR(mpu3050_acpi_match),
+                       .acpi_match_table = ACPI_PTR(mpu3050_acpi_match),
                },
                .probe          = mpu3050_probe,
                .remove         = mpu3050_remove,
index 765d7fc..6557507 100644 (file)
@@ -37,6 +37,14 @@ controlling P state selection. These files have been added to
       no_turbo: limits the driver to selecting P states below the turbo
       frequency range.
 
+      turbo_pct: displays the percentage of the total performance that
+      is supported by hardware that is in the turbo range.  This number
+      is independent of whether turbo has been disabled or not.
+
+      num_pstates: displays the number of pstates that are supported
+      by hardware.  This number is independent of whether turbo has
+      been disabled or not.
+
 For contemporary Intel processors, the frequency is controlled by the
 processor itself and the P-states exposed to software are related to
 performance levels.  The idea that frequency can be set to a single
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
new file mode 100644 (file)
index 0000000..b54bf3a
--- /dev/null
@@ -0,0 +1,110 @@
+
+* Samsung Exynos PPMU (Platform Performance Monitoring Unit) device
+
+The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
+each IP. PPMU provides the primitive values to get performance data. These
+PPMU events provide information of the SoC's behaviors so that you may
+use to analyze system performance, to make behaviors visible and to count
+usages of each IP (DMC, CPU, RIGHTBUS, LEFTBUS, CAM interface, LCD, G3D, MFC).
+The Exynos PPMU driver uses the devfreq-event class to provide event data
+to various devfreq devices. The devfreq devices would use the event data when
+derterming the current state of each IP.
+
+Required properties:
+- compatible: Should be "samsung,exynos-ppmu".
+- reg: physical base address of each PPMU and length of memory mapped region.
+
+Optional properties:
+- clock-names : the name of clock used by the PPMU, "ppmu"
+- clocks : phandles for clock specified in "clock-names" property
+- #clock-cells: should be 1.
+
+Example1 : PPMU nodes in exynos3250.dtsi are listed below.
+
+               ppmu_dmc0: ppmu_dmc0@106a0000 {
+                       compatible = "samsung,exynos-ppmu";
+                       reg = <0x106a0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_dmc1: ppmu_dmc1@106b0000 {
+                       compatible = "samsung,exynos-ppmu";
+                       reg = <0x106b0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_cpu: ppmu_cpu@106c0000 {
+                       compatible = "samsung,exynos-ppmu";
+                       reg = <0x106c0000 0x2000>;
+                       status = "disabled";
+               };
+
+               ppmu_rightbus: ppmu_rightbus@112a0000 {
+                       compatible = "samsung,exynos-ppmu";
+                       reg = <0x112a0000 0x2000>;
+                       clocks = <&cmu CLK_PPMURIGHT>;
+                       clock-names = "ppmu";
+                       status = "disabled";
+               };
+
+               ppmu_leftbus: ppmu_leftbus0@116a0000 {
+                       compatible = "samsung,exynos-ppmu";
+                       reg = <0x116a0000 0x2000>;
+                       clocks = <&cmu CLK_PPMULEFT>;
+                       clock-names = "ppmu";
+                       status = "disabled";
+               };
+
+Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
+
+       &ppmu_dmc0 {
+               status = "okay";
+
+               events {
+                       ppmu_dmc0_3: ppmu-event3-dmc0 {
+                               event-name = "ppmu-event3-dmc0";
+                       };
+
+                       ppmu_dmc0_2: ppmu-event2-dmc0 {
+                               event-name = "ppmu-event2-dmc0";
+                       };
+
+                       ppmu_dmc0_1: ppmu-event1-dmc0 {
+                               event-name = "ppmu-event1-dmc0";
+                       };
+
+                       ppmu_dmc0_0: ppmu-event0-dmc0 {
+                               event-name = "ppmu-event0-dmc0";
+                       };
+               };
+       };
+
+       &ppmu_dmc1 {
+               status = "okay";
+
+               events {
+                       ppmu_dmc1_3: ppmu-event3-dmc1 {
+                               event-name = "ppmu-event3-dmc1";
+                       };
+               };
+       };
+
+       &ppmu_leftbus {
+               status = "okay";
+
+               events {
+                       ppmu_leftbus_3: ppmu-event3-leftbus {
+                               event-name = "ppmu-event3-leftbus";
+                       };
+               };
+       };
+
+       &ppmu_rightbus {
+               status = "okay";
+
+               events {
+                       ppmu_rightbus_3: ppmu-event3-rightbus {
+                               event-name = "ppmu-event3-rightbus";
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/pci/versatile.txt b/Documentation/devicetree/bindings/pci/versatile.txt
new file mode 100644 (file)
index 0000000..ebd1e7d
--- /dev/null
@@ -0,0 +1,59 @@
+* ARM Versatile Platform Baseboard PCI interface
+
+PCI host controller found on the ARM Versatile PB board's FPGA.
+
+Required properties:
+- compatible: should contain "arm,versatile-pci" to identify the Versatile PCI
+  controller.
+- reg: base addresses and lengths of the pci controller. There must be 3
+  entries:
+       - Versatile-specific registers
+       - Self Config space
+       - Config space
+- #address-cells: set to <3>
+- #size-cells: set to <2>
+- device_type: set to "pci"
+- bus-range: set to <0 0xff>
+- ranges: ranges for the PCI memory and I/O regions
+- #interrupt-cells: set to <1>
+- interrupt-map-mask and interrupt-map: standard PCI properties to define
+       the mapping of the PCI interface to interrupt numbers.
+
+Example:
+
+pci-controller@10001000 {
+       compatible = "arm,versatile-pci";
+       device_type = "pci";
+       reg = <0x10001000 0x1000
+              0x41000000 0x10000
+              0x42000000 0x100000>;
+       bus-range = <0 0xff>;
+       #address-cells = <3>;
+       #size-cells = <2>;
+       #interrupt-cells = <1>;
+
+       ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000   /* downstream I/O */
+                 0x02000000 0 0x50000000 0x50000000 0 0x10000000   /* non-prefetchable memory */
+                 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
+
+       interrupt-map-mask = <0x1800 0 0 7>;
+       interrupt-map = <0x1800 0 0 1 &sic 28
+                        0x1800 0 0 2 &sic 29
+                        0x1800 0 0 3 &sic 30
+                        0x1800 0 0 4 &sic 27
+
+                        0x1000 0 0 1 &sic 27
+                        0x1000 0 0 2 &sic 28
+                        0x1000 0 0 3 &sic 29
+                        0x1000 0 0 4 &sic 30
+
+                        0x0800 0 0 1 &sic 30
+                        0x0800 0 0 2 &sic 27
+                        0x0800 0 0 3 &sic 28
+                        0x0800 0 0 4 &sic 29
+
+                        0x0000 0 0 1 &sic 29
+                        0x0000 0 0 2 &sic 30
+                        0x0000 0 0 3 &sic 27
+                        0x0000 0 0 4 &sic 28>;
+};
index 5be51fd..0bfafe1 100644 (file)
@@ -287,9 +287,9 @@ The following sysctls are available for the XFS filesystem:
                XFS_ERRLEVEL_LOW:       1
                XFS_ERRLEVEL_HIGH:      5
 
-  fs.xfs.panic_mask            (Min: 0  Default: 0  Max: 127)
+  fs.xfs.panic_mask            (Min: 0  Default: 0  Max: 255)
        Causes certain error conditions to call BUG(). Value is a bitmask;
-       AND together the tags which represent errors which should cause panics:
+       OR together the tags which represent errors which should cause panics:
 
                XFS_NO_PTAG                     0
                XFS_PTAG_IFLUSH                 0x00000001
@@ -299,6 +299,7 @@ The following sysctls are available for the XFS filesystem:
                XFS_PTAG_SHUTDOWN_CORRUPT       0x00000010
                XFS_PTAG_SHUTDOWN_IOERROR       0x00000020
                XFS_PTAG_SHUTDOWN_LOGERROR      0x00000040
+               XFS_PTAG_FSBLOCK_ZERO           0x00000080
 
        This option is intended for debugging only.
 
@@ -348,16 +349,13 @@ The following sysctls are available for the XFS filesystem:
 Deprecated Sysctls
 ==================
 
-  fs.xfs.xfsbufd_centisecs     (Min: 50  Default: 100  Max: 3000)
-       Dirty metadata is now tracked by the log subsystem and
-       flushing is driven by log space and idling demands. The
-       xfsbufd no longer exists, so this syctl does nothing.
+None at present.
 
-       Due for removal in 3.14.
 
-  fs.xfs.age_buffer_centisecs  (Min: 100  Default: 1500  Max: 720000)
-       Dirty metadata is now tracked by the log subsystem and
-       flushing is driven by log space and idling demands. The
-       xfsbufd no longer exists, so this syctl does nothing.
+Removed Sysctls
+===============
 
-       Due for removal in 3.14.
+  Name                         Removed
+  ----                         -------
+  fs.xfs.xfsbufd_centisec      v3.20
+  fs.xfs.age_buffer_centisecs  v3.20
index 176d4fe..f06f1f6 100644 (file)
@@ -1470,6 +1470,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                       no_hwp
                         Do not enable hardware P state control (HWP)
                         if available.
+               hwp_only
+                       Only load intel_pstate on systems which support
+                       hardware P state control (HWP) if available.
 
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
index 1bdfa04..4685aee 100644 (file)
@@ -69,6 +69,10 @@ Reason for this is that the RTC is the only reliably available piece of
 hardware during resume operations where a value can be set that will
 survive a reboot.
 
+pm_trace is not compatible with asynchronous suspend, so it turns
+asynchronous suspend off (which may work around timing or
+ordering-sensitive bugs).
+
 Consequence is that after a resume (even if it is successful) your system
 clock will have a value corresponding to the magic number instead of the
 correct date/time! It is therefore advisable to use a program like ntp-date
index 26557b7..54c7ce0 100644 (file)
@@ -270,12 +270,12 @@ F:        drivers/acpi/
 F:     drivers/pnp/pnpacpi/
 F:     include/linux/acpi.h
 F:     include/acpi/
-F:     Documentation/acpi
+F:     Documentation/acpi/
 F:     Documentation/ABI/testing/sysfs-bus-acpi
 F:     drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
 F:     drivers/pci/*/*/*acpi*
-F:     tools/power/acpi
+F:     tools/power/acpi/
 
 ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
@@ -7277,6 +7277,14 @@ F:       include/linux/pci*
 F:     arch/x86/pci/
 F:     arch/x86/kernel/quirks.c
 
+PCI DRIVER FOR ARM VERSATILE PLATFORM
+M:     Rob Herring <robh@kernel.org>
+L:     linux-pci@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/pci/versatile.txt
+F:     drivers/pci/host/pci-versatile.c
+
 PCI DRIVER FOR APPLIEDMICRO XGENE
 M:     Tanmay Inamdar <tinamdar@apm.com>
 L:     linux-pci@vger.kernel.org
index 97d07ed..dcb2e0c 100644 (file)
@@ -1279,6 +1279,9 @@ config PCI_DOMAINS
        bool
        depends on PCI
 
+config PCI_DOMAINS_GENERIC
+       def_bool PCI_DOMAINS
+
 config PCI_NANOENGINE
        bool "BSE nanoEngine PCI support"
        depends on SA1100_NANOENGINE
index e36c1e8..b83137f 100644 (file)
                        clock-names = "apb_pclk";
                };
 
+               pci-controller@10001000 {
+                       compatible = "arm,versatile-pci";
+                       device_type = "pci";
+                       reg = <0x10001000 0x1000
+                              0x41000000 0x10000
+                              0x42000000 0x100000>;
+                       bus-range = <0 0xff>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       #interrupt-cells = <1>;
+
+                       ranges = <0x01000000 0 0x00000000 0x43000000 0 0x00010000   /* downstream I/O */
+                                 0x02000000 0 0x50000000 0x50000000 0 0x10000000   /* non-prefetchable memory */
+                                 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */
+
+                       interrupt-map-mask = <0x1800 0 0 7>;
+                       interrupt-map = <0x1800 0 0 1 &sic 28
+                                        0x1800 0 0 2 &sic 29
+                                        0x1800 0 0 3 &sic 30
+                                        0x1800 0 0 4 &sic 27
+
+                                        0x1000 0 0 1 &sic 27
+                                        0x1000 0 0 2 &sic 28
+                                        0x1000 0 0 3 &sic 29
+                                        0x1000 0 0 4 &sic 30
+
+                                        0x0800 0 0 1 &sic 30
+                                        0x0800 0 0 2 &sic 27
+                                        0x0800 0 0 3 &sic 28
+                                        0x0800 0 0 4 &sic 29
+
+                                        0x0000 0 0 1 &sic 29
+                                        0x0000 0 0 2 &sic 30
+                                        0x0000 0 0 3 &sic 27
+                                        0x0000 0 0 4 &sic 28>;
+               };
+
                fpga {
                        uart@9000 {
                                compatible = "arm,pl011", "arm,primecell";
index 8292b5f..28b9bb3 100644 (file)
@@ -19,9 +19,6 @@ struct pci_bus;
 struct device;
 
 struct hw_pci {
-#ifdef CONFIG_PCI_DOMAINS
-       int             domain;
-#endif
 #ifdef CONFIG_PCI_MSI
        struct msi_controller *msi_ctrl;
 #endif
@@ -45,9 +42,6 @@ struct hw_pci {
  * Per-controller structure
  */
 struct pci_sys_data {
-#ifdef CONFIG_PCI_DOMAINS
-       int             domain;
-#endif
 #ifdef CONFIG_PCI_MSI
        struct msi_controller *msi_ctrl;
 #endif
index 7e95d85..585dc33 100644 (file)
@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void)
 }
 
 #ifdef CONFIG_PCI_DOMAINS
-static inline int pci_domain_nr(struct pci_bus *bus)
-{
-       struct pci_sys_data *root = bus->sysdata;
-
-       return root->domain;
-}
-
 static inline int pci_proc_domain(struct pci_bus *bus)
 {
        return pci_domain_nr(bus);
index 68c739b..2f7e6ff 100644 (file)
@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                                   struct page **pages, unsigned int count);
 
 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
-                                    struct gnttab_map_grant_ref *kmap_ops,
+                                    struct gnttab_unmap_grant_ref *kunmap_ops,
                                     struct page **pages, unsigned int count);
 
 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
index a4effd6..ab19b7c 100644 (file)
@@ -422,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
 {
        int ret;
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
 
        if (list_empty(&sys->resources)) {
                pci_add_resource_offset(&sys->resources,
                         &iomem_resource, sys->mem_offset);
        }
 
-       list_for_each_entry(window, &sys->resources, list) {
+       resource_list_for_each_entry(window, &sys->resources)
                if (resource_type(window->res) == IORESOURCE_IO)
                        return 0;
-       }
 
        sys->io_res.start = (busnr * SZ_64K) ?  : pcibios_min_io;
        sys->io_res.end = (busnr + 1) * SZ_64K - 1;
@@ -463,9 +462,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
                if (!sys)
                        panic("PCI: unable to allocate sys data!");
 
-#ifdef CONFIG_PCI_DOMAINS
-               sys->domain  = hw->domain;
-#endif
 #ifdef CONFIG_PCI_MSI
                sys->msi_ctrl = hw->msi_ctrl;
 #endif
index 45d6bd0..c622c30 100644 (file)
@@ -30,18 +30,15 @@ struct cns3xxx_pcie {
        unsigned int irqs[2];
        struct resource res_io;
        struct resource res_mem;
-       struct hw_pci hw_pci;
-
+       int port;
        bool linked;
 };
 
-static struct cns3xxx_pcie cns3xxx_pcie[]; /* forward decl. */
-
 static struct cns3xxx_pcie *sysdata_to_cnspci(void *sysdata)
 {
        struct pci_sys_data *root = sysdata;
 
-       return &cns3xxx_pcie[root->domain];
+       return root->private_data;
 }
 
 static struct cns3xxx_pcie *pdev_to_cnspci(const struct pci_dev *dev)
@@ -54,8 +51,8 @@ static struct cns3xxx_pcie *pbus_to_cnspci(struct pci_bus *bus)
        return sysdata_to_cnspci(bus->sysdata);
 }
 
-static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
-                                 unsigned int devfn, int where)
+static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
+                                        unsigned int devfn, int where)
 {
        struct cns3xxx_pcie *cnspci = pbus_to_cnspci(bus);
        int busno = bus->number;
@@ -91,55 +88,22 @@ static void __iomem *cns3xxx_pci_cfg_base(struct pci_bus *bus,
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
                                   int where, int size, u32 *val)
 {
-       u32 v;
-       void __iomem *base;
+       int ret;
        u32 mask = (0x1ull << (size * 8)) - 1;
        int shift = (where % 4) * 8;
 
-       base = cns3xxx_pci_cfg_base(bus, devfn, where);
-       if (!base) {
-               *val = 0xffffffff;
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       v = __raw_readl(base);
+       ret = pci_generic_config_read32(bus, devfn, where, size, val);
 
-       if (bus->number == 0 && devfn == 0 &&
-                       (where & 0xffc) == PCI_CLASS_REVISION) {
+       if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn &&
+           (where & 0xffc) == PCI_CLASS_REVISION)
                /*
                 * RC's class is 0xb, but Linux PCI driver needs 0x604
                 * for a PCIe bridge. So we must fixup the class code
                 * to 0x604 here.
                 */
-               v &= 0xff;
-               v |= 0x604 << 16;
-       }
+               *val = ((((*val << shift) & 0xff) | (0x604 << 16)) >> shift) & mask;
 
-       *val = (v >> shift) & mask;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int cns3xxx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
-                                   int where, int size, u32 val)
-{
-       u32 v;
-       void __iomem *base;
-       u32 mask = (0x1ull << (size * 8)) - 1;
-       int shift = (where % 4) * 8;
-
-       base = cns3xxx_pci_cfg_base(bus, devfn, where);
-       if (!base)
-               return PCIBIOS_SUCCESSFUL;
-
-       v = __raw_readl(base);
-
-       v &= ~(mask << shift);
-       v |= (val & mask) << shift;
-
-       __raw_writel(v, base);
-
-       return PCIBIOS_SUCCESSFUL;
+       return ret;
 }
 
 static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
@@ -158,8 +122,9 @@ static int cns3xxx_pci_setup(int nr, struct pci_sys_data *sys)
 }
 
 static struct pci_ops cns3xxx_pcie_ops = {
+       .map_bus = cns3xxx_pci_map_bus,
        .read = cns3xxx_pci_read_config,
-       .write = cns3xxx_pci_write_config,
+       .write = pci_generic_config_write,
 };
 
 static int cns3xxx_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -192,13 +157,7 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
                        .flags = IORESOURCE_MEM,
                },
                .irqs = { IRQ_CNS3XXX_PCIE0_RC, IRQ_CNS3XXX_PCIE0_DEVICE, },
-               .hw_pci = {
-                       .domain = 0,
-                       .nr_controllers = 1,
-                       .ops = &cns3xxx_pcie_ops,
-                       .setup = cns3xxx_pci_setup,
-                       .map_irq = cns3xxx_pcie_map_irq,
-               },
+               .port = 0,
        },
        [1] = {
                .host_regs = (void __iomem *)CNS3XXX_PCIE1_HOST_BASE_VIRT,
@@ -217,19 +176,13 @@ static struct cns3xxx_pcie cns3xxx_pcie[] = {
                        .flags = IORESOURCE_MEM,
                },
                .irqs = { IRQ_CNS3XXX_PCIE1_RC, IRQ_CNS3XXX_PCIE1_DEVICE, },
-               .hw_pci = {
-                       .domain = 1,
-                       .nr_controllers = 1,
-                       .ops = &cns3xxx_pcie_ops,
-                       .setup = cns3xxx_pci_setup,
-                       .map_irq = cns3xxx_pcie_map_irq,
-               },
+               .port = 1,
        },
 };
 
 static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
 {
-       int port = cnspci->hw_pci.domain;
+       int port = cnspci->port;
        u32 reg;
        unsigned long time;
 
@@ -260,9 +213,9 @@ static void __init cns3xxx_pcie_check_link(struct cns3xxx_pcie *cnspci)
 
 static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci)
 {
-       int port = cnspci->hw_pci.domain;
+       int port = cnspci->port;
        struct pci_sys_data sd = {
-               .domain = port,
+               .private_data = cnspci,
        };
        struct pci_bus bus = {
                .number = 0,
@@ -323,6 +276,14 @@ static int cns3xxx_pcie_abort_handler(unsigned long addr, unsigned int fsr,
 void __init cns3xxx_pcie_init_late(void)
 {
        int i;
+       void *private_data;
+       struct hw_pci hw_pci = {
+              .nr_controllers = 1,
+              .ops = &cns3xxx_pcie_ops,
+              .setup = cns3xxx_pci_setup,
+              .map_irq = cns3xxx_pcie_map_irq,
+              .private_data = &private_data,
+       };
 
        pcibios_min_io = 0;
        pcibios_min_mem = 0;
@@ -335,7 +296,8 @@ void __init cns3xxx_pcie_init_late(void)
                cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i));
                cns3xxx_pcie_check_link(&cns3xxx_pcie[i]);
                cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]);
-               pci_common_init(&cns3xxx_pcie[i].hw_pci);
+               private_data = &cns3xxx_pcie[i];
+               pci_common_init(&hw_pci);
        }
 
        pci_assign_unassigned_resources();
index c186a17..2565f0e 100644 (file)
@@ -356,7 +356,6 @@ static u64 pre_mem_pci_sz;
  *      7:2    register number
  *
  */
-static DEFINE_RAW_SPINLOCK(v3_lock);
 
 #undef V3_LB_BASE_PREFETCH
 #define V3_LB_BASE_PREFETCH 0
@@ -457,67 +456,21 @@ static void v3_close_config_window(void)
 static int v3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
                          int size, u32 *val)
 {
-       void __iomem *addr;
-       unsigned long flags;
-       u32 v;
-
-       raw_spin_lock_irqsave(&v3_lock, flags);
-       addr = v3_open_config_window(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               v = __raw_readb(addr);
-               break;
-
-       case 2:
-               v = __raw_readw(addr);
-               break;
-
-       default:
-               v = __raw_readl(addr);
-               break;
-       }
-
+       int ret = pci_generic_config_read(bus, devfn, where, size, val);
        v3_close_config_window();
-       raw_spin_unlock_irqrestore(&v3_lock, flags);
-
-       *val = v;
-       return PCIBIOS_SUCCESSFUL;
+       return ret;
 }
 
 static int v3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
                           int size, u32 val)
 {
-       void __iomem *addr;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&v3_lock, flags);
-       addr = v3_open_config_window(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               __raw_writeb((u8)val, addr);
-               __raw_readb(addr);
-               break;
-
-       case 2:
-               __raw_writew((u16)val, addr);
-               __raw_readw(addr);
-               break;
-
-       case 4:
-               __raw_writel(val, addr);
-               __raw_readl(addr);
-               break;
-       }
-
+       int ret = pci_generic_config_write(bus, devfn, where, size, val);
        v3_close_config_window();
-       raw_spin_unlock_irqrestore(&v3_lock, flags);
-
-       return PCIBIOS_SUCCESSFUL;
+       return ret;
 }
 
 static struct pci_ops pci_v3_ops = {
+       .map_bus = v3_open_config_window,
        .read   = v3_read_config,
        .write  = v3_write_config,
 };
@@ -658,7 +611,6 @@ static int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
  */
 static void __init pci_v3_preinit(void)
 {
-       unsigned long flags;
        unsigned int temp;
        phys_addr_t io_address = pci_pio_to_address(io_mem.start);
 
@@ -672,8 +624,6 @@ static void __init pci_v3_preinit(void)
        hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
        hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch");
 
-       raw_spin_lock_irqsave(&v3_lock, flags);
-
        /*
         * Unlock V3 registers, but only if they were previously locked.
         */
@@ -736,8 +686,6 @@ static void __init pci_v3_preinit(void)
        v3_writew(V3_LB_CFG, v3_readw(V3_LB_CFG) | (1 << 10));
        v3_writeb(V3_LB_IMASK, 0x28);
        __raw_writel(3, ap_syscon_base + INTEGRATOR_SC_PCIENABLE_OFFSET);
-
-       raw_spin_unlock_irqrestore(&v3_lock, flags);
 }
 
 static void __init pci_v3_postinit(void)
index bb18193..c1bc4c3 100644 (file)
@@ -38,8 +38,6 @@
 
 
 static int pci_dbg;
-static int pci_cfg_dbg;
-
 
 static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsigned int where)
 {
@@ -59,75 +57,11 @@ static void ks8695_pci_setupconfig(unsigned int bus_nr, unsigned int devfn, unsi
        }
 }
 
-
-/*
- * The KS8695 datasheet prohibits anything other than 32bit accesses
- * to the IO registers, so all our configuration must be done with
- * 32bit operations, and the correct bit masking and shifting.
- */
-
-static int ks8695_pci_readconfig(struct pci_bus *bus,
-                       unsigned int devfn, int where, int size, u32 *value)
-{
-       ks8695_pci_setupconfig(bus->number, devfn, where);
-
-       *value = __raw_readl(KS8695_PCI_VA +  KS8695_PBCD);
-
-       switch (size) {
-               case 4:
-                       break;
-               case 2:
-                       *value = *value >> ((where & 2) * 8);
-                       *value &= 0xffff;
-                       break;
-               case 1:
-                       *value = *value >> ((where & 3) * 8);
-                       *value &= 0xff;
-                       break;
-       }
-
-       if (pci_cfg_dbg) {
-               printk("read: %d,%08x,%02x,%d: %08x (%08x)\n",
-                       bus->number, devfn, where, size, *value,
-                       __raw_readl(KS8695_PCI_VA +  KS8695_PBCD));
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int ks8695_pci_writeconfig(struct pci_bus *bus,
-                       unsigned int devfn, int where, int size, u32 value)
+static void __iomem *ks8695_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+                                       int where)
 {
-       unsigned long tmp;
-
-       if (pci_cfg_dbg) {
-               printk("write: %d,%08x,%02x,%d: %08x\n",
-                       bus->number, devfn, where, size, value);
-       }
-
        ks8695_pci_setupconfig(bus->number, devfn, where);
-
-       switch (size) {
-               case 4:
-                       __raw_writel(value, KS8695_PCI_VA +  KS8695_PBCD);
-                       break;
-               case 2:
-                       tmp = __raw_readl(KS8695_PCI_VA +  KS8695_PBCD);
-                       tmp &= ~(0xffff << ((where & 2) * 8));
-                       tmp |= value << ((where & 2) * 8);
-
-                       __raw_writel(tmp, KS8695_PCI_VA +  KS8695_PBCD);
-                       break;
-               case 1:
-                       tmp = __raw_readl(KS8695_PCI_VA +  KS8695_PBCD);
-                       tmp &= ~(0xff << ((where & 3) * 8));
-                       tmp |= value << ((where & 3) * 8);
-
-                       __raw_writel(tmp, KS8695_PCI_VA +  KS8695_PBCD);
-                       break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
+       return KS8695_PCI_VA +  KS8695_PBCD;
 }
 
 static void ks8695_local_writeconfig(int where, u32 value)
@@ -137,8 +71,9 @@ static void ks8695_local_writeconfig(int where, u32 value)
 }
 
 static struct pci_ops ks8695_pci_ops = {
-       .read   = ks8695_pci_readconfig,
-       .write  = ks8695_pci_writeconfig,
+       .map_bus = ks8695_pci_map_bus,
+       .read   = pci_generic_config_read32,
+       .write  = pci_generic_config_write32,
 };
 
 static struct resource pci_mem = {
index b704433..d7ae8d5 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/kernel.h>
 #include <linux/irq.h>
 #include <linux/pci.h>
-#include <linux/spinlock.h>
 
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 #include <mach/nanoengine.h>
 #include <mach/hardware.h>
 
-static DEFINE_SPINLOCK(nano_lock);
-
-static int nanoengine_get_pci_address(struct pci_bus *bus,
-       unsigned int devfn, int where, void __iomem **address)
+static void __iomem *nanoengine_pci_map_bus(struct pci_bus *bus,
+                                           unsigned int devfn, int where)
 {
-       int ret = PCIBIOS_DEVICE_NOT_FOUND;
-       unsigned int busnr = bus->number;
+       if (bus->number != 0 || (devfn >> 3) != 0)
+               return NULL;
 
-       *address = (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
+       return (void __iomem *)NANO_PCI_CONFIG_SPACE_VIRT +
                ((bus->number << 16) | (devfn << 8) | (where & ~3));
-
-       ret = (busnr > 255 || devfn > 255 || where > 255) ?
-               PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-       return ret;
-}
-
-static int nanoengine_read_config(struct pci_bus *bus, unsigned int devfn, int where,
-       int size, u32 *val)
-{
-       int ret;
-       void __iomem *address;
-       unsigned long flags;
-       u32 v;
-
-       /* nanoEngine PCI bridge does not return -1 for a non-existing
-        * device. We must fake the answer. We know that the only valid
-        * device is device zero at bus 0, which is the network chip. */
-       if (bus->number != 0 || (devfn >> 3) != 0) {
-               v = -1;
-               nanoengine_get_pci_address(bus, devfn, where, &address);
-               goto exit_function;
-       }
-
-       spin_lock_irqsave(&nano_lock, flags);
-
-       ret = nanoengine_get_pci_address(bus, devfn, where, &address);
-       if (ret != PCIBIOS_SUCCESSFUL)
-               return ret;
-       v = __raw_readl(address);
-
-       spin_unlock_irqrestore(&nano_lock, flags);
-
-       v >>= ((where & 3) * 8);
-       v &= (unsigned long)(-1) >> ((4 - size) * 8);
-
-exit_function:
-       *val = v;
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int nanoengine_write_config(struct pci_bus *bus, unsigned int devfn, int where,
-       int size, u32 val)
-{
-       int ret;
-       void __iomem *address;
-       unsigned long flags;
-       unsigned shift;
-       u32 v;
-
-       shift = (where & 3) * 8;
-
-       spin_lock_irqsave(&nano_lock, flags);
-
-       ret = nanoengine_get_pci_address(bus, devfn, where, &address);
-       if (ret != PCIBIOS_SUCCESSFUL)
-               return ret;
-       v = __raw_readl(address);
-       switch (size) {
-       case 1:
-               v &= ~(0xFF << shift);
-               v |= val << shift;
-               break;
-       case 2:
-               v &= ~(0xFFFF << shift);
-               v |= val << shift;
-               break;
-       case 4:
-               v = val;
-               break;
-       }
-       __raw_writel(v, address);
-
-       spin_unlock_irqrestore(&nano_lock, flags);
-
-       return PCIBIOS_SUCCESSFUL;
 }
 
 static struct pci_ops pci_nano_ops = {
-       .read   = nanoengine_read_config,
-       .write  = nanoengine_write_config,
+       .map_bus = nanoengine_pci_map_bus,
+       .read   = pci_generic_config_read32,
+       .write  = pci_generic_config_write32,
 };
 
 static int __init pci_nanoengine_map_irq(const struct pci_dev *dev, u8 slot,
index c7ca936..263a204 100644 (file)
 
 struct start_info _xen_start_info;
 struct start_info *xen_start_info = &_xen_start_info;
-EXPORT_SYMBOL_GPL(xen_start_info);
+EXPORT_SYMBOL(xen_start_info);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
-EXPORT_SYMBOL_GPL(xen_domain_type);
+EXPORT_SYMBOL(xen_domain_type);
 
 struct shared_info xen_dummy_shared_info;
 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
index 351b24a..793551d 100644 (file)
@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 
 struct dma_map_ops *xen_dma_ops;
-EXPORT_SYMBOL_GPL(xen_dma_ops);
+EXPORT_SYMBOL(xen_dma_ops);
 
 static struct dma_map_ops xen_swiotlb_dma_ops = {
        .mapping_error = xen_swiotlb_dma_mapping_error,
index 0548577..cb7a14c 100644 (file)
@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
 EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
 
 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
-                             struct gnttab_map_grant_ref *kmap_ops,
+                             struct gnttab_unmap_grant_ref *kunmap_ops,
                              struct page **pages, unsigned int count)
 {
        int i;
index ce5836c..6f93c24 100644 (file)
@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
 
        return 0;
 }
-
-
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
-static bool dt_domain_found = false;
-
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
-{
-       int domain = of_get_pci_domain_nr(parent->of_node);
-
-       if (domain >= 0) {
-               dt_domain_found = true;
-       } else if (dt_domain_found == true) {
-               dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
-                       parent->of_node->full_name);
-               return;
-       } else {
-               domain = pci_get_new_domain_nr();
-       }
-
-       bus->domain_nr = domain;
-}
-#endif
index efa5d65..b073f4d 100644 (file)
@@ -168,8 +168,8 @@ static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int whe
 }
 
 static struct pci_ops pci_direct_frv = {
-       pci_frv_read_config,
-       pci_frv_write_config,
+       .read = pci_frv_read_config,
+       .write = pci_frv_write_config,
 };
 
 /*
index 8b9318d..bd09bf7 100644 (file)
@@ -69,10 +69,10 @@ static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
        status = acpi_resource_to_address64(resource, &addr);
        if (ACPI_SUCCESS(status) &&
            addr.resource_type == ACPI_MEMORY_RANGE &&
-           addr.address_length &&
+           addr.address.address_length &&
            addr.producer_consumer == ACPI_CONSUMER) {
-               space->base = addr.minimum;
-               space->length = addr.address_length;
+               space->base = addr.address.minimum;
+               space->length = addr.address.address_length;
                return AE_CTRL_TERMINATE;
        }
        return AE_OK;           /* keep looking */
index e795cb8..2c44989 100644 (file)
@@ -380,9 +380,6 @@ static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 
 static int __init acpi_parse_madt(struct acpi_table_header *table)
 {
-       if (!table)
-               return -EINVAL;
-
        acpi_madt = (struct acpi_table_madt *)table;
 
        acpi_madt_rev = acpi_madt->header.revision;
@@ -645,9 +642,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
        struct acpi_table_header *fadt_header;
        struct acpi_table_fadt *fadt;
 
-       if (!table)
-               return -EINVAL;
-
        fadt_header = (struct acpi_table_header *)table;
        if (fadt_header->revision != 3)
                return -ENODEV; /* Only deal with ACPI 2.0 FADT */
index 900cc93..48cc657 100644 (file)
@@ -188,12 +188,12 @@ static u64 add_io_space(struct pci_root_info *info,
 
        name = (char *)(iospace + 1);
 
-       min = addr->minimum;
-       max = min + addr->address_length - 1;
+       min = addr->address.minimum;
+       max = min + addr->address.address_length - 1;
        if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
                sparse = 1;
 
-       space_nr = new_space(addr->translation_offset, sparse);
+       space_nr = new_space(addr->address.translation_offset, sparse);
        if (space_nr == ~0)
                goto free_resource;
 
@@ -247,7 +247,7 @@ static acpi_status resource_to_window(struct acpi_resource *resource,
        if (ACPI_SUCCESS(status) &&
            (addr->resource_type == ACPI_MEMORY_RANGE ||
             addr->resource_type == ACPI_IO_RANGE) &&
-           addr->address_length &&
+           addr->address.address_length &&
            addr->producer_consumer == ACPI_PRODUCER)
                return AE_OK;
 
@@ -284,7 +284,7 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
        if (addr.resource_type == ACPI_MEMORY_RANGE) {
                flags = IORESOURCE_MEM;
                root = &iomem_resource;
-               offset = addr.translation_offset;
+               offset = addr.address.translation_offset;
        } else if (addr.resource_type == ACPI_IO_RANGE) {
                flags = IORESOURCE_IO;
                root = &ioport_resource;
@@ -297,8 +297,8 @@ static acpi_status add_window(struct acpi_resource *res, void *data)
        resource = &info->res[info->res_num];
        resource->name = info->name;
        resource->flags = flags;
-       resource->start = addr.minimum + offset;
-       resource->end = resource->start + addr.address_length - 1;
+       resource->start = addr.address.minimum + offset;
+       resource->end = resource->start + addr.address.address_length - 1;
        info->res_offset[info->res_num] = offset;
 
        if (insert_resource(root, resource)) {
index 95022b0..264db11 100644 (file)
@@ -170,7 +170,6 @@ repeat:
        if (acia_stat & ACIA_RDRF) {
                /* received a character */
                scancode = acia.key_data;       /* get it or reset the ACIA, I'll get it! */
-               tasklet_schedule(&keyboard_tasklet);
        interpret_scancode:
                switch (kb_state.state) {
                case KEYBOARD:
@@ -430,14 +429,6 @@ void ikbd_mouse_y0_top(void)
 }
 EXPORT_SYMBOL(ikbd_mouse_y0_top);
 
-/* Resume */
-void ikbd_resume(void)
-{
-       static const char cmd[1] = { 0x11 };
-
-       ikbd_write(cmd, 1);
-}
-
 /* Disable mouse */
 void ikbd_mouse_disable(void)
 {
@@ -447,14 +438,6 @@ void ikbd_mouse_disable(void)
 }
 EXPORT_SYMBOL(ikbd_mouse_disable);
 
-/* Pause output */
-void ikbd_pause(void)
-{
-       static const char cmd[1] = { 0x13 };
-
-       ikbd_write(cmd, 1);
-}
-
 /* Set joystick event reporting */
 void ikbd_joystick_event_on(void)
 {
@@ -502,56 +485,6 @@ void ikbd_joystick_disable(void)
        ikbd_write(cmd, 1);
 }
 
-/* Time-of-day clock set */
-void ikbd_clock_set(int year, int month, int day, int hour, int minute, int second)
-{
-       char cmd[7] = { 0x1B, year, month, day, hour, minute, second };
-
-       ikbd_write(cmd, 7);
-}
-
-/* Interrogate time-of-day clock */
-void ikbd_clock_get(int *year, int *month, int *day, int *hour, int *minute, int second)
-{
-       static const char cmd[1] = { 0x1C };
-
-       ikbd_write(cmd, 1);
-}
-
-/* Memory load */
-void ikbd_mem_write(int address, int size, char *data)
-{
-       panic("Attempt to write data into keyboard memory");
-}
-
-/* Memory read */
-void ikbd_mem_read(int address, char data[6])
-{
-       char cmd[3] = { 0x21, address>>8, address&0xFF };
-
-       ikbd_write(cmd, 3);
-
-       /* receive data and put it in data */
-}
-
-/* Controller execute */
-void ikbd_exec(int address)
-{
-       char cmd[3] = { 0x22, address>>8, address&0xFF };
-
-       ikbd_write(cmd, 3);
-}
-
-/* Status inquiries (0x87-0x9A) not yet implemented */
-
-/* Set the state of the caps lock led. */
-void atari_kbd_leds(unsigned int leds)
-{
-       char cmd[6] = {32, 0, 4, 1, 254 + ((leds & 4) != 0), 0};
-
-       ikbd_write(cmd, 6);
-}
-
 /*
  * The original code sometimes left the interrupt line of
  * the ACIAs low forever. I hope, it is fixed now.
@@ -571,9 +504,8 @@ int atari_keyb_init(void)
        kb_state.state = KEYBOARD;
        kb_state.len = 0;
 
-       error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
-                           IRQ_TYPE_SLOW, "keyboard,mouse,MIDI",
-                           atari_keyboard_interrupt);
+       error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, 0,
+                           "keyboard,mouse,MIDI", atari_keyboard_interrupt);
        if (error)
                return error;
 
index e5a6659..ba65f94 100644 (file)
@@ -198,7 +198,7 @@ EXPORT_SYMBOL(stdma_islocked);
 void __init stdma_init(void)
 {
        stdma_isr = NULL;
-       if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
+       if (request_irq(IRQ_MFP_FDC, stdma_int, IRQF_SHARED,
                        "ST-DMA floppy,ACSI,IDE,Falcon-SCSI", stdma_int))
                pr_err("Couldn't register ST-DMA interrupt\n");
 }
index da8f981..c549b48 100644 (file)
@@ -32,8 +32,7 @@ atari_sched_init(irq_handler_t timer_routine)
     /* start timer C, div = 1:100 */
     st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
     /* install interrupt service routine for MFP Timer C */
-    if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
-                   "timer", timer_routine))
+    if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
        pr_err("Couldn't register timer interrupt\n");
 }
 
index 399df88..1a10a08 100644 (file)
@@ -36,6 +36,7 @@ CONFIG_AMIGA_PCMCIA=y
 CONFIG_ZORRO_NAMES=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -55,6 +56,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -96,6 +99,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -142,6 +147,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -163,6 +169,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -170,9 +177,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -181,8 +191,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -197,6 +206,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -213,17 +224,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_AMIGA=m
@@ -299,6 +337,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -316,6 +357,8 @@ CONFIG_ARIADNE=y
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -371,6 +414,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MSM6242=m
@@ -392,6 +436,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -407,6 +452,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -476,10 +522,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -514,13 +568,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index be16740..7859a73 100644 (file)
@@ -34,6 +34,7 @@ CONFIG_M68060=y
 CONFIG_APOLLO=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -291,6 +332,8 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -332,6 +375,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -350,6 +394,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -365,6 +410,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -434,10 +480,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -472,13 +526,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 391e185..372593a 100644 (file)
@@ -31,8 +31,10 @@ CONFIG_M68030=y
 CONFIG_M68040=y
 CONFIG_M68060=y
 CONFIG_ATARI=y
+CONFIG_ATARI_ROM_ISA=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -52,6 +54,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -93,6 +97,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -139,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -229,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_ATARI=m
@@ -289,6 +328,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -299,8 +341,12 @@ CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+CONFIG_NE2000=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
@@ -345,6 +391,7 @@ CONFIG_DMASOUND_ATARI=m
 CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
+# CONFIG_HID_PLANTRONICS is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
 # CONFIG_IOMMU_SUPPORT is not set
@@ -354,6 +401,8 @@ CONFIG_NATFEAT=y
 CONFIG_NFBLOCK=y
 CONFIG_NFCON=y
 CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
@@ -367,6 +416,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -382,6 +432,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -451,10 +502,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -489,13 +548,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index d0e705d..f3bd35e 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_VME=y
 CONFIG_BVME6000=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index fdc7e96..9f9793f 100644 (file)
@@ -34,6 +34,7 @@ CONFIG_M68060=y
 CONFIG_HP300=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -53,6 +54,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -94,6 +97,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -140,6 +145,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -161,6 +167,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -168,9 +175,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -179,8 +189,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -195,6 +204,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -211,17 +222,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -230,9 +267,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -281,6 +319,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -292,6 +333,8 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -335,6 +378,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -352,6 +396,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -367,6 +412,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -436,10 +482,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -474,13 +528,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 3d34564..89f225c 100644 (file)
@@ -33,6 +33,7 @@ CONFIG_M68KFPU_EMU=y
 CONFIG_MAC=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -52,6 +53,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -93,6 +96,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -139,6 +144,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -160,6 +166,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -167,9 +174,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -178,8 +188,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -194,6 +203,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -210,20 +221,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -232,9 +269,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_SWIM=m
 CONFIG_BLK_DEV_LOOP=y
@@ -297,6 +335,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -310,6 +351,8 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
 CONFIG_MAC8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -357,6 +400,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -374,6 +418,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -389,6 +434,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -458,11 +504,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -497,13 +550,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 59aa420..d3cdb54 100644 (file)
@@ -39,9 +39,11 @@ CONFIG_SUN3X=y
 CONFIG_Q40=y
 CONFIG_ZORRO=y
 CONFIG_AMIGA_PCMCIA=y
+CONFIG_ATARI_ROM_ISA=y
 CONFIG_ZORRO_NAMES=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -61,6 +63,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -102,6 +106,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -148,6 +154,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -169,6 +176,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -176,9 +184,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -187,8 +198,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -203,6 +213,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -219,20 +231,46 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -241,9 +279,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
@@ -329,6 +368,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -352,11 +394,14 @@ CONFIG_MVME16x_NET=y
 CONFIG_MACSONIC=y
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
-CONFIG_NE2000=m
+CONFIG_NE2000=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
@@ -423,6 +468,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MSM6242=m
@@ -435,6 +481,8 @@ CONFIG_NATFEAT=y
 CONFIG_NFBLOCK=y
 CONFIG_NFCON=y
 CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
 CONFIG_ATARI_DSP56K=m
 CONFIG_AMIGA_BUILTIN_SERIAL=y
 CONFIG_SERIAL_CONSOLE=y
@@ -450,6 +498,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -465,6 +514,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -534,11 +584,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -573,13 +630,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 066b24a..b4c7664 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_VME=y
 CONFIG_MVME147=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -50,6 +51,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -91,6 +94,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -137,6 +142,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -158,6 +164,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -165,9 +172,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -176,8 +186,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -192,6 +201,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -208,17 +219,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -227,9 +264,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -279,6 +317,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -427,10 +473,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -465,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 9326ea6..0d4a26f 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_VME=y
 CONFIG_MVME16x=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -280,6 +318,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -290,6 +331,8 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -326,6 +369,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -343,6 +387,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -358,6 +403,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -427,11 +473,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index d7d1101..5d581c5 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_M68060=y
 CONFIG_Q40=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -51,6 +52,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -92,6 +95,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -138,6 +143,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -159,6 +165,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -166,9 +173,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -177,8 +187,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -193,6 +202,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -209,17 +220,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -228,9 +265,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_PARPORT=m
 CONFIG_PARPORT_PC=m
@@ -286,6 +324,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -300,6 +341,8 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_NE2000=m
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
@@ -347,6 +390,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -365,6 +409,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -380,6 +425,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -449,10 +495,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -487,13 +541,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 98522e8..c6b49a4 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -287,6 +328,8 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -428,10 +474,17 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -466,13 +519,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 5128a8c..b65785e 100644 (file)
@@ -29,6 +29,7 @@ CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3X=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
@@ -48,6 +49,8 @@ CONFIG_NET_IPIP=m
 CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
+CONFIG_NET_FOU_IP_TUNNELS=y
+CONFIG_GENEVE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -89,6 +92,8 @@ CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
@@ -135,6 +140,7 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m
 CONFIG_NETFILTER_XT_MATCH_OSF=m
 CONFIG_NETFILTER_XT_MATCH_OWNER=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
 CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
@@ -156,6 +162,7 @@ CONFIG_IP_SET_HASH_IPMARK=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_MAC=m
 CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
 CONFIG_IP_SET_HASH_NETNET=m
@@ -163,9 +170,12 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_MASQ_IPV4=m
+CONFIG_NFT_REDIR_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -174,8 +184,7 @@ CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
 CONFIG_IP_NF_TARGET_SYNPROXY=m
-CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_NAT=m
 CONFIG_IP_NF_TARGET_MASQUERADE=m
 CONFIG_IP_NF_TARGET_NETMAP=m
 CONFIG_IP_NF_TARGET_REDIRECT=m
@@ -190,6 +199,8 @@ CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_MASQ_IPV6=m
+CONFIG_NFT_REDIR_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -206,17 +217,43 @@ CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_TARGET_SYNPROXY=m
 CONFIG_IP6_NF_MANGLE=m
 CONFIG_IP6_NF_RAW=m
-CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_DCCP=m
 # CONFIG_IP_DCCP_CCID3 is not set
 CONFIG_SCTP_COOKIE_HMAC_SHA1=y
 CONFIG_RDS=m
 CONFIG_RDS_TCP=m
 CONFIG_L2TP=m
+CONFIG_BRIDGE=m
 CONFIG_ATALK=m
+CONFIG_6LOWPAN=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -225,9 +262,10 @@ CONFIG_BATMAN_ADV_MCAST=y
 CONFIG_NETLINK_DIAG=m
 CONFIG_NET_MPLS_GSO=m
 # CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_FW_LOADER_USER_HELPER is not set
 CONFIG_CONNECTOR=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -277,6 +315,9 @@ CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
 CONFIG_NET_TEAM_MODE_RANDOM=m
 CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
 CONFIG_NET_TEAM_MODE_LOADBALANCE=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -288,6 +329,8 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
@@ -327,6 +370,7 @@ CONFIG_HID=m
 CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
+# CONFIG_HID_PLANTRONICS is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_GENERIC=m
@@ -344,6 +388,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
 CONFIG_ISO9660_FS=y
 CONFIG_JOLIET=y
 CONFIG_ZISOFS=y
@@ -359,6 +404,7 @@ CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_MINIX_FS=m
 CONFIG_OMFS_FS=m
@@ -428,10 +474,18 @@ CONFIG_DLM=m
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_ASYNC_RAID6_TEST=m
 CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_LKM=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_UDELAY=m
+CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
@@ -466,13 +520,10 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
 CONFIG_XZ_DEC_TEST=m
index 9b6c691..1517ed1 100644 (file)
@@ -6,6 +6,7 @@ generic-y += device.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
+generic-y += futex.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
index 953e0ac..6321c44 100644 (file)
 /* convert irq_handler index to vector number */
 #define IRQ_SOURCE_TO_VECTOR(i)        ((i) + ((i) < 8 ? 0x18 : (0x40-8)))
 
-/* interrupt service types */
-#define IRQ_TYPE_SLOW     0
-#define IRQ_TYPE_FAST     1
-#define IRQ_TYPE_PRIO     2
-
 /* ST-MFP interrupts */
 #define IRQ_MFP_BUSY      (8)
 #define IRQ_MFP_DCD       (9)
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644 (file)
index bc868af..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef _ASM_M68K_FUTEX_H
-#define _ASM_M68K_FUTEX_H
-
-#ifdef __KERNEL__
-#if !defined(CONFIG_MMU)
-#include <asm-generic/futex.h>
-#else  /* CONFIG_MMU */
-
-#include <linux/futex.h>
-#include <linux/uaccess.h>
-#include <asm/errno.h>
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-                             u32 oldval, u32 newval)
-{
-       u32 val;
-
-       if (unlikely(get_user(val, uaddr) != 0))
-               return -EFAULT;
-
-       if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
-               return -EFAULT;
-
-       *uval = val;
-
-       return 0;
-}
-
-static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
-{
-       int op = (encoded_op >> 28) & 7;
-       int cmp = (encoded_op >> 24) & 15;
-       int oparg = (encoded_op << 8) >> 20;
-       int cmparg = (encoded_op << 20) >> 20;
-       int oldval, ret;
-       u32 tmp;
-
-       if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
-               oparg = 1 << oparg;
-
-       pagefault_disable();    /* implies preempt_disable() */
-
-       ret = -EFAULT;
-       if (unlikely(get_user(oldval, uaddr) != 0))
-               goto out_pagefault_enable;
-
-       ret = 0;
-       tmp = oldval;
-
-       switch (op) {
-       case FUTEX_OP_SET:
-               tmp = oparg;
-               break;
-       case FUTEX_OP_ADD:
-               tmp += oparg;
-               break;
-       case FUTEX_OP_OR:
-               tmp |= oparg;
-               break;
-       case FUTEX_OP_ANDN:
-               tmp &= ~oparg;
-               break;
-       case FUTEX_OP_XOR:
-               tmp ^= oparg;
-               break;
-       default:
-               ret = -ENOSYS;
-       }
-
-       if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
-               ret = -EFAULT;
-
-out_pagefault_enable:
-       pagefault_enable();     /* subsumes preempt_enable() */
-
-       if (ret == 0) {
-               switch (cmp) {
-               case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
-               case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
-               case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
-               case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
-               case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
-               case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
-               default: ret = -ENOSYS;
-               }
-       }
-       return ret;
-}
-
-#endif /* CONFIG_MMU */
-#endif /* __KERNEL__ */
-#endif /* _ASM_M68K_FUTEX_H */
index 29c7c6c..42235e7 100644 (file)
@@ -55,7 +55,7 @@ struct mac_model
 #define MAC_SCSI_QUADRA3       4
 #define MAC_SCSI_IIFX          5
 #define MAC_SCSI_DUO           6
-#define MAC_SCSI_CCL           7
+#define MAC_SCSI_LC            7
 #define MAC_SCSI_LATE          8
 
 #define MAC_IDE_NONE           0
index e9c3756..689b47d 100644 (file)
@@ -296,7 +296,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "IIvi",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -305,7 +305,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "IIvx",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -320,7 +320,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Classic II",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -329,7 +329,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Color Classic",
                .adb_type       = MAC_ADB_CUDA,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_CCL,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -338,7 +338,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Color Classic II",
                .adb_type       = MAC_ADB_CUDA,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_CCL,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -353,7 +353,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "LC",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -362,7 +362,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "LC II",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -371,7 +371,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "LC III",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -499,7 +499,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Performa 460",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -526,7 +526,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Performa 520",
                .adb_type       = MAC_ADB_CUDA,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_CCL,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -535,7 +535,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Performa 550",
                .adb_type       = MAC_ADB_CUDA,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_CCL,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -567,7 +567,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "TV",
                .adb_type       = MAC_ADB_CUDA,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_CCL,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -576,7 +576,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "Performa 600",
                .adb_type       = MAC_ADB_IISI,
                .via_type       = MAC_VIA_IICI,
-               .scsi_type      = MAC_SCSI_OLD,
+               .scsi_type      = MAC_SCSI_LC,
                .scc_type       = MAC_SCC_II,
                .nubus_type     = MAC_NUBUS,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -1109,8 +1109,10 @@ int __init mac_platform_init(void)
                platform_device_register_simple("mac_scsi", 0,
                        mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
                break;
-       case MAC_SCSI_CCL:
-               /* Addresses from the Color Classic Developer Note.
+       case MAC_SCSI_LC:
+               /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
+                * Also from the Developer Notes for Classic II, LC III,
+                * Color Classic and IIvx.
                 * $50F0 6000 - $50F0 7FFF: SCSI handshake
                 * $50F1 0000 - $50F1 1FFF: SCSI
                 * $50F1 2000 - $50F1 3FFF: SCSI DMA
index 1bb3ce6..e6a3b56 100644 (file)
@@ -168,49 +168,3 @@ int mvme147_set_clock_mmss (unsigned long nowtime)
 {
        return 0;
 }
-
-/*-------------------  Serial console stuff ------------------------*/
-
-static void scc_delay (void)
-{
-       int n;
-       volatile int trash;
-
-       for (n = 0; n < 20; n++)
-               trash = n;
-}
-
-static void scc_write (char ch)
-{
-       volatile char *p = (volatile char *)M147_SCC_A_ADDR;
-
-       do {
-               scc_delay();
-       }
-       while (!(*p & 4));
-       scc_delay();
-       *p = 8;
-       scc_delay();
-       *p = ch;
-}
-
-
-void m147_scc_write (struct console *co, const char *str, unsigned count)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       while (count--)
-       {
-               if (*str == '\n')
-                       scc_write ('\r');
-               scc_write (*str++);
-       }
-       local_irq_restore(flags);
-}
-
-void mvme147_init_console_port (struct console *co, int cflag)
-{
-       co->write    = m147_scc_write;
-}
index 6ef7a81..1755e2f 100644 (file)
@@ -161,4 +161,4 @@ static int __init rtc_MK48T08_init(void)
        printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
        return misc_register(&rtc_dev);
 }
-module_init(rtc_MK48T08_init);
+device_initcall(rtc_MK48T08_init);
index 8e211cc..91d2068 100644 (file)
@@ -34,5 +34,4 @@ $(obj)/simpleImage.%: vmlinux FORCE
        $(call if_changed,strip)
        @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
 
-
-clean-files += simpleImage.*.unstrip linux.bin.ub
+clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
index c4982d1..a3d2e42 100644 (file)
@@ -16,5 +16,3 @@ quiet_cmd_cp = CP      $< $@$2
 
 # Rule to build device tree blobs
 DTC_FLAGS := -p 1024
-
-clean-files += *.dtb
index 60cb39d..ea2a9cd 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <linux/param.h>
 
-extern inline void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
 {
        asm volatile ("# __delay                \n\t"           \
                        "1: addi        %0, %0, -1\t\n"         \
@@ -43,7 +43,7 @@ extern inline void __delay(unsigned long loops)
 
 extern unsigned long loops_per_jiffy;
 
-extern inline void __udelay(unsigned int x)
+static inline void __udelay(unsigned int x)
 {
 
        unsigned long long tmp =
index 78b17d4..ad27acb 100644 (file)
@@ -23,6 +23,9 @@ static inline void arch_kgdb_breakpoint(void)
        __asm__ __volatile__("brki r16, 0x18;");
 }
 
+struct pt_regs;
+asmlinkage void microblaze_kgdb_break(struct pt_regs *regs);
+
 #endif /* __ASSEMBLY__ */
 #endif /* __MICROBLAZE_KGDB_H__ */
 #endif /* __KERNEL__ */
index 3a8e36d..0540bba 100644 (file)
@@ -1,15 +1 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_LINKAGE_H
-#define _ASM_MICROBLAZE_LINKAGE_H
-
-#define __ALIGN                .align 4
-#define __ALIGN_STR    ".align 4"
-
-#endif /* _ASM_MICROBLAZE_LINKAGE_H */
+#include <asm-generic/linkage.h>
index 7fdf7fa..61436d6 100644 (file)
@@ -60,7 +60,7 @@ extern unsigned long get_zero_page_fast(void);
 
 extern void __bad_pte(pmd_t *pmd);
 
-extern inline pgd_t *get_pgd_slow(void)
+static inline pgd_t *get_pgd_slow(void)
 {
        pgd_t *ret;
 
@@ -70,7 +70,7 @@ extern inline pgd_t *get_pgd_slow(void)
        return ret;
 }
 
-extern inline pgd_t *get_pgd_fast(void)
+static inline pgd_t *get_pgd_fast(void)
 {
        unsigned long *ret;
 
@@ -84,14 +84,14 @@ extern inline pgd_t *get_pgd_fast(void)
        return (pgd_t *)ret;
 }
 
-extern inline void free_pgd_fast(pgd_t *pgd)
+static inline void free_pgd_fast(pgd_t *pgd)
 {
        *(unsigned long **)pgd = pgd_quicklist;
        pgd_quicklist = (unsigned long *) pgd;
        pgtable_cache_size++;
 }
 
-extern inline void free_pgd_slow(pgd_t *pgd)
+static inline void free_pgd_slow(pgd_t *pgd)
 {
        free_page((unsigned long)pgd);
 }
@@ -146,19 +146,19 @@ static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm,
        return (pte_t *)ret;
 }
 
-extern inline void pte_free_fast(pte_t *pte)
+static inline void pte_free_fast(pte_t *pte)
 {
        *(unsigned long **)pte = pte_quicklist;
        pte_quicklist = (unsigned long *) pte;
        pgtable_cache_size++;
 }
 
-extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
        free_page((unsigned long)pte);
 }
 
-extern inline void pte_free_slow(struct page *ptepage)
+static inline void pte_free_slow(struct page *ptepage)
 {
        __free_page(ptepage);
 }
index 53cfaf3..04a5bec 100644 (file)
@@ -97,7 +97,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
                microblaze_set_syscall_arg(regs, i++, *args++);
 }
 
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs);
 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
 
 static inline int syscall_get_arch(void)
index 59a89a6..62942fd 100644 (file)
@@ -220,7 +220,7 @@ extern long __user_bad(void);
        } else {                                                        \
                __gu_err = -EFAULT;                                     \
        }                                                               \
-       x = (typeof(*(ptr)))__gu_val;                                   \
+       x = (__force typeof(*(ptr)))__gu_val;                           \
        __gu_err;                                                       \
 })
 
@@ -242,7 +242,7 @@ extern long __user_bad(void);
        default:                                                        \
                /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
        }                                                               \
-       x = (__typeof__(*(ptr))) __gu_val;                              \
+       x = (__force __typeof__(*(ptr))) __gu_val;                      \
        __gu_err;                                                       \
 })
 
@@ -306,7 +306,7 @@ extern long __user_bad(void);
 
 #define __put_user_check(x, ptr, size)                                 \
 ({                                                                     \
-       typeof(*(ptr)) volatile __pu_val = x;                                   \
+       typeof(*(ptr)) volatile __pu_val = x;                           \
        typeof(*(ptr)) __user *__pu_addr = (ptr);                       \
        int __pu_err = 0;                                               \
                                                                        \
index 0a53362..76ed17b 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         388
+#define __NR_syscalls         389
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index c712677..32850c7 100644 (file)
 #define __NR_getrandom         385
 #define __NR_memfd_create      386
 #define __NR_bpf               387
+#define __NR_execveat          388
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 08d50cc..f08baca 100644 (file)
@@ -16,7 +16,7 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o intc.o irq.o \
-       platform.o process.o prom.o prom_parse.o ptrace.o \
+       platform.o process.o prom.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
index a6e4441..0bde47e 100644 (file)
@@ -140,10 +140,10 @@ do {                                                                      \
 /* It is used only first parameter for OP - for wic, wdc */
 #define CACHE_RANGE_LOOP_1(start, end, line_length, op)                        \
 do {                                                                   \
-       int volatile temp = 0;                                          \
-       int align = ~(line_length - 1);                                 \
+       unsigned int volatile temp = 0;                                         \
+       unsigned int align = ~(line_length - 1);                                        \
        end = ((end & align) == end) ? end - line_length : end & align; \
-       WARN_ON(end - start < 0);                                       \
+       WARN_ON(end < start);                                   \
                                                                        \
        __asm__ __volatile__ (" 1:      " #op " %1, r0;"                \
                                        "cmpu   %0, %1, %2;"            \
index 93c26cf..a32daec 100644 (file)
@@ -33,7 +33,7 @@
 void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
 {
        struct pvr_s pvr;
-       int temp; /* for saving temp value */
+       u32 temp; /* for saving temp value */
        get_pvr(&pvr);
 
        CI(ver_code, VERSION);
index 4854285..85dbda4 100644 (file)
@@ -22,7 +22,7 @@ static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
 
 void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
 {
-       int i = 0;
+       u32 i = 0;
 
        ci->use_instr =
                (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
index 234acad..d1dd6e8 100644 (file)
@@ -41,8 +41,12 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"8.40.a", 0x18},
        {"8.40.b", 0x19},
        {"8.50.a", 0x1a},
+       {"8.50.b", 0x1c},
+       {"8.50.c", 0x1e},
        {"9.0", 0x1b},
        {"9.1", 0x1d},
+       {"9.2", 0x1f},
+       {"9.3", 0x20},
        {NULL, 0},
 };
 
@@ -61,11 +65,14 @@ const struct family_string_key family_string_lookup[] = {
        {"spartan3adsp", 0xc},
        {"spartan6", 0xd},
        {"virtex6", 0xe},
+       {"virtex7", 0xf},
        /* FIXME There is no key code defined for spartan2 */
        {"spartan2", 0xf0},
        {"kintex7", 0x10},
        {"artix7", 0x11},
        {"zynq7000", 0x12},
+       {"UltraScale Virtex", 0x13},
+       {"UltraScale Kintex", 0x14},
        {NULL, 0},
 };
 
index 15c7c12..719feee 100644 (file)
@@ -148,17 +148,17 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
        ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
        if (ret < 0) {
                pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
-               return -EINVAL;
+               return ret;
        }
 
        ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
        if (ret < 0) {
                pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
-               return -EINVAL;
+               return ret;
        }
 
-       if (intr_mask > (u32)((1ULL << nr_irq) - 1))
-               pr_info(" ERROR: Mismatch in kind-of-intr param\n");
+       if (intr_mask >> nr_irq)
+               pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
 
        pr_info("%s: num_irq=%d, edge=0x%x\n",
                intc->full_name, nr_irq, intr_mask);
index 09a5e82..8736af5 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <asm/cacheflush.h>
 #include <asm/asm-offsets.h>
+#include <asm/kgdb.h>
 #include <asm/pvr.h>
 
 #define GDB_REG                0
@@ -35,9 +36,10 @@ struct pvr_s pvr;
 
 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
-       int i;
+       unsigned int i;
        unsigned long *pt_regb = (unsigned long *)regs;
        int temp;
+
        /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
        for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
                gdb_regs[i] = pt_regb[i];
@@ -67,7 +69,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 
 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
-       int i;
+       unsigned int i;
        unsigned long *pt_regb = (unsigned long *)regs;
 
        /* pt_regs and gdb_regs have the same 37 values.
@@ -77,7 +79,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
                pt_regb[i] = gdb_regs[i];
 }
 
-void microblaze_kgdb_break(struct pt_regs *regs)
+asmlinkage void microblaze_kgdb_break(struct pt_regs *regs)
 {
        if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
                return;
@@ -91,7 +93,7 @@ void microblaze_kgdb_break(struct pt_regs *regs)
 /* untested */
 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
 {
-       int i;
+       unsigned int i;
        unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
 
        /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
diff --git a/arch/microblaze/kernel/prom_parse.c b/arch/microblaze/kernel/prom_parse.c
deleted file mode 100644 (file)
index 068762f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#undef DEBUG
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/etherdevice.h>
-#include <linux/of_address.h>
-#include <asm/prom.h>
-
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
-               unsigned long *busno, unsigned long *phys, unsigned long *size)
-{
-       const u32 *dma_window;
-       u32 cells;
-       const unsigned char *prop;
-
-       dma_window = dma_window_prop;
-
-       /* busno is always one cell */
-       *busno = *(dma_window++);
-
-       prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
-       if (!prop)
-               prop = of_get_property(dn, "#address-cells", NULL);
-
-       cells = prop ? *(u32 *)prop : of_n_addr_cells(dn);
-       *phys = of_read_number(dma_window, cells);
-
-       dma_window += cells;
-
-       prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
-       cells = prop ? *(u32 *)prop : of_n_size_cells(dn);
-       *size = of_read_number(dma_window, cells);
-}
index bb10637..8cfa98c 100644 (file)
@@ -132,9 +132,9 @@ long arch_ptrace(struct task_struct *child, long request,
        return rval;
 }
 
-asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
+asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs)
 {
-       long ret = 0;
+       unsigned long ret = 0;
 
        secure_computing_strict(regs->r12);
 
index fbe58c6..bab4c83 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/init.h>
 #include <linux/of_platform.h>
-#include <asm/prom.h>
 
 /* Trigger specific functions */
 #ifdef CONFIG_GPIOLIB
index 8955a38..2357060 100644 (file)
@@ -158,7 +158,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 {
        struct rt_sigframe __user *frame;
        int err = 0, sig = ksig->sig;
-       int signal;
+       unsigned long signal;
        unsigned long address = 0;
 #ifdef CONFIG_MMU
        pmd_t *pmdp;
@@ -174,7 +174,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                && current_thread_info()->exec_domain->signal_invmap
                && sig < 32
                ? current_thread_info()->exec_domain->signal_invmap[sig]
-               : sig;
+               : (unsigned long)sig;
 
        if (ksig->ka.sa.sa_flags & SA_SIGINFO)
                err |= copy_siginfo_to_user(&frame->info, &ksig->info);
index 0166e89..29c8568 100644 (file)
@@ -388,3 +388,4 @@ ENTRY(sys_call_table)
        .long sys_getrandom             /* 385 */
        .long sys_memfd_create
        .long sys_bpf
+       .long sys_execveat
index 1f7b8d4..61c04ee 100644 (file)
@@ -59,7 +59,7 @@ struct stack_trace;
  *
  * Return - Number of stack bytes the instruction reserves or reclaims
  */
-inline long get_frame_size(unsigned long instr)
+static inline long get_frame_size(unsigned long instr)
 {
        return abs((s16)(instr & 0xFFFF));
 }
index 5ec2a7b..f2355e3 100644 (file)
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm1480_pci_ops = {
-       bcm1480_pcibios_read,
-       bcm1480_pcibios_write,
+       .read = bcm1480_pcibios_read,
+       .write = bcm1480_pcibios_write,
 };
 
 static struct resource bcm1480_mem_resource = {
index d07e041..bedb72b 100644 (file)
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
 
 
 static struct pci_ops octeon_pci_ops = {
-       octeon_read_config,
-       octeon_write_config,
+       .read = octeon_read_config,
+       .write = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
index 5e36c33..eb4a17b 100644 (file)
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-       octeon_pcie0_read_config,
-       octeon_pcie0_write_config,
+       .read = octeon_pcie0_read_config,
+       .write = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-       octeon_pcie1_read_config,
-       octeon_pcie1_write_config,
+       .read = octeon_pcie1_read_config,
+       .write = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
 };
 
 static struct pci_ops octeon_dummy_ops = {
-       octeon_dummy_read_config,
-       octeon_dummy_write_config,
+       .read = octeon_dummy_read_config,
+       .write = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
index 471ff39..613ca1e 100644 (file)
@@ -228,8 +228,8 @@ static int pci_ampci_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops pci_direct_ampci = {
-       pci_ampci_read_config,
-       pci_ampci_write_config,
+       .read = pci_ampci_read_config,
+       .write = pci_ampci_write_config,
 };
 
 /*
index f223875..94170e4 100644 (file)
@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops scc_pciex_pci_ops = {
-       scc_pciex_read_config,
-       scc_pciex_write_config,
+       .read = scc_pciex_read_config,
+       .write = scc_pciex_write_config,
 };
 
 static void pciex_clear_intr_all(unsigned int __iomem *base)
index 04702db..f4071a6 100644 (file)
@@ -133,17 +133,23 @@ static void __init fixup_bus_range(struct device_node *bridge)
        |(((unsigned int)(off)) & 0xFCUL) \
        |1UL)
 
-static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
-                                              u8 bus, u8 dev_fn, u8 offset)
+static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus,
+                                        unsigned int dev_fn,
+                                        int offset)
 {
        unsigned int caddr;
+       struct pci_controller *hose;
 
-       if (bus == hose->first_busno) {
+       hose = pci_bus_to_host(bus);
+       if (hose == NULL)
+               return NULL;
+
+       if (bus->number == hose->first_busno) {
                if (dev_fn < (11 << 3))
                        return NULL;
                caddr = MACRISC_CFA0(dev_fn, offset);
        } else
-               caddr = MACRISC_CFA1(bus, dev_fn, offset);
+               caddr = MACRISC_CFA1(bus->number, dev_fn, offset);
 
        /* Uninorth will return garbage if we don't read back the value ! */
        do {
@@ -154,129 +160,46 @@ static volatile void __iomem *macrisc_cfg_access(struct pci_controller* hose,
        return hose->cfg_data + offset;
 }
 
-static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
-                                     int offset, int len, u32 *val)
-{
-       struct pci_controller *hose;
-       volatile void __iomem *addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       if (offset >= 0x100)
-               return  PCIBIOS_BAD_REGISTER_NUMBER;
-       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               *val = in_8(addr);
-               break;
-       case 2:
-               *val = in_le16(addr);
-               break;
-       default:
-               *val = in_le32(addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
-                                      int offset, int len, u32 val)
-{
-       struct pci_controller *hose;
-       volatile void __iomem *addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       if (offset >= 0x100)
-               return  PCIBIOS_BAD_REGISTER_NUMBER;
-       addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               out_8(addr, val);
-               break;
-       case 2:
-               out_le16(addr, val);
-               break;
-       default:
-               out_le32(addr, val);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
 static struct pci_ops macrisc_pci_ops =
 {
-       .read = macrisc_read_config,
-       .write = macrisc_write_config,
+       .map_bus = macrisc_cfg_map_bus,
+       .read = pci_generic_config_read,
+       .write = pci_generic_config_write,
 };
 
 #ifdef CONFIG_PPC32
 /*
  * Verify that a specific (bus, dev_fn) exists on chaos
  */
-static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
+static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn,
+                                  int offset)
 {
        struct device_node *np;
        const u32 *vendor, *device;
 
        if (offset >= 0x100)
-               return  PCIBIOS_BAD_REGISTER_NUMBER;
+               return NULL;
        np = of_pci_find_child_device(bus->dev.of_node, devfn);
        if (np == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
+               return NULL;
 
        vendor = of_get_property(np, "vendor-id", NULL);
        device = of_get_property(np, "device-id", NULL);
        if (vendor == NULL || device == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
+               return NULL;
 
        if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
            && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       return PCIBIOS_SUCCESSFUL;
-}
+               return NULL;
 
-static int
-chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
-                 int len, u32 *val)
-{
-       int result = chaos_validate_dev(bus, devfn, offset);
-       if (result == PCIBIOS_BAD_REGISTER_NUMBER)
-               *val = ~0U;
-       if (result != PCIBIOS_SUCCESSFUL)
-               return result;
-       return macrisc_read_config(bus, devfn, offset, len, val);
-}
-
-static int
-chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
-                  int len, u32 val)
-{
-       int result = chaos_validate_dev(bus, devfn, offset);
-       if (result != PCIBIOS_SUCCESSFUL)
-               return result;
-       return macrisc_write_config(bus, devfn, offset, len, val);
+       return macrisc_cfg_map_bus(bus, devfn, offset);
 }
 
 static struct pci_ops chaos_pci_ops =
 {
-       .read = chaos_read_config,
-       .write = chaos_write_config,
+       .map_bus = chaos_map_bus,
+       .read = pci_generic_config_read,
+       .write = pci_generic_config_write,
 };
 
 static void __init setup_chaos(struct pci_controller *hose,
@@ -471,15 +394,24 @@ static struct pci_ops u3_ht_pci_ops =
         |(((unsigned int)(off)) & 0xfcU)       \
         |1UL)
 
-static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
-                                       u8 bus, u8 dev_fn, int offset)
+static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus,
+                                        unsigned int dev_fn,
+                                        int offset)
 {
+       struct pci_controller *hose;
        unsigned int caddr;
 
-       if (bus == hose->first_busno) {
+       if (offset >= 0x1000)
+               return NULL;
+
+       hose = pci_bus_to_host(bus);
+       if (!hose)
+               return NULL;
+
+       if (bus->number == hose->first_busno) {
                caddr = U4_PCIE_CFA0(dev_fn, offset);
        } else
-               caddr = U4_PCIE_CFA1(bus, dev_fn, offset);
+               caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset);
 
        /* Uninorth will return garbage if we don't read back the value ! */
        do {
@@ -490,74 +422,11 @@ static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
        return hose->cfg_data + offset;
 }
 
-static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
-                              int offset, int len, u32 *val)
-{
-       struct pci_controller *hose;
-       volatile void __iomem *addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       if (offset >= 0x1000)
-               return  PCIBIOS_BAD_REGISTER_NUMBER;
-       addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               *val = in_8(addr);
-               break;
-       case 2:
-               *val = in_le16(addr);
-               break;
-       default:
-               *val = in_le32(addr);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
-                               int offset, int len, u32 val)
-{
-       struct pci_controller *hose;
-       volatile void __iomem *addr;
-
-       hose = pci_bus_to_host(bus);
-       if (hose == NULL)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       if (offset >= 0x1000)
-               return  PCIBIOS_BAD_REGISTER_NUMBER;
-       addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       /*
-        * Note: the caller has already checked that offset is
-        * suitably aligned and that len is 1, 2 or 4.
-        */
-       switch (len) {
-       case 1:
-               out_8(addr, val);
-               break;
-       case 2:
-               out_le16(addr, val);
-               break;
-       default:
-               out_le32(addr, val);
-               break;
-       }
-       return PCIBIOS_SUCCESSFUL;
-}
-
 static struct pci_ops u4_pcie_pci_ops =
 {
-       .read = u4_pcie_read_config,
-       .write = u4_pcie_write_config,
+       .map_bus = u4_pcie_cfg_map_bus,
+       .read = pci_generic_config_read,
+       .write = pci_generic_config_write,
 };
 
 static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev)
index 6455c1e..271b67e 100644 (file)
@@ -645,61 +645,21 @@ mapped:
        return pcie->cfg_type1 + offset;
 }
 
-static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
-                                   int offset, int len, u32 *val)
-{
-       void __iomem *cfg_addr;
-
-       cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
-       if (!cfg_addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (len) {
-       case 1:
-               *val = in_8(cfg_addr);
-               break;
-       case 2:
-               *val = in_le16(cfg_addr);
-               break;
-       default:
-               *val = in_le32(cfg_addr);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
 static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
                                     int offset, int len, u32 val)
 {
        struct pci_controller *hose = pci_bus_to_host(bus);
-       void __iomem *cfg_addr;
-
-       cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset);
-       if (!cfg_addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
 
        /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
        if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
                val &= 0xffffff00;
 
-       switch (len) {
-       case 1:
-               out_8(cfg_addr, val);
-               break;
-       case 2:
-               out_le16(cfg_addr, val);
-               break;
-       default:
-               out_le32(cfg_addr, val);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
+       return pci_generic_config_write(bus, devfn, offset, len, val);
 }
 
 static struct pci_ops mpc83xx_pcie_ops = {
-       .read = mpc83xx_pcie_read_config,
+       .map_bus = mpc83xx_pcie_remap_cfg,
+       .read = pci_generic_config_read,
        .write = mpc83xx_pcie_write_config,
 };
 
index f70c789..325df47 100644 (file)
@@ -245,7 +245,7 @@ static void fixup_read_and_payload_sizes(void)
 {
        struct pci_dev *dev = NULL;
        int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
-       int max_read_size = 0x2; /* Limit to 512 byte reads. */
+       int max_read_size = PCI_EXP_DEVCTL_READRQ_512B;
        u16 new_values;
 
        /* Scan for the smallest maximum payload size. */
@@ -258,7 +258,7 @@ static void fixup_read_and_payload_sizes(void)
        }
 
        /* Now, set the max_payload_size for all devices to that value. */
-       new_values = (max_read_size << 12) | (smallest_max_payload << 5);
+       new_values = max_read_size | (smallest_max_payload << 5);
        for_each_pci_dev(dev)
                pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
                                PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ,
index 5e28e2b..019f4e5 100644 (file)
@@ -497,6 +497,17 @@ config X86_INTEL_LPSS
          things like clock tree (common clock framework) and pincontrol
          which are needed by the LPSS peripheral drivers.
 
+config X86_AMD_PLATFORM_DEVICE
+       bool "AMD ACPI2Platform devices support"
+       depends on ACPI
+       select COMMON_CLK
+       select PINCTRL
+       ---help---
+         Select to interpret AMD specific ACPI device to platform device
+         such as I2C, UART, GPIO found on AMD Carrizo and later chipsets.
+         I2C and UART depend on COMMON_CLK to set clock. GPIO driver is
+         implemented under PINCTRL subsystem.
+
 config IOSF_MBI
        tristate "Intel SoC IOSF Sideband support for SoC platforms"
        depends on PCI
index 164e3f8..fa1195d 100644 (file)
@@ -93,8 +93,6 @@ extern raw_spinlock_t pci_config_lock;
 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
 
-extern bool mp_should_keep_irq(struct device *dev);
-
 struct pci_raw_ops {
        int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
                                                int reg, int len, u32 *val);
index 5eea099..358dcd3 100644 (file)
@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                                   struct gnttab_map_grant_ref *kmap_ops,
                                   struct page **pages, unsigned int count);
 extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
-                                    struct gnttab_map_grant_ref *kmap_ops,
+                                    struct gnttab_unmap_grant_ref *kunmap_ops,
                                     struct page **pages, unsigned int count);
-extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
 
 /*
  * Helper functions to write or read unsigned long values to/from
@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
                return mfn;
 
        pfn = mfn_to_pfn_no_overrides(mfn);
-       if (__pfn_to_mfn(pfn) != mfn) {
-               /*
-                * If this appears to be a foreign mfn (because the pfn
-                * doesn't map back to the mfn), then check the local override
-                * table to see if there's a better pfn to use.
-                *
-                * m2p_find_override_pfn returns ~0 if it doesn't find anything.
-                */
-               pfn = m2p_find_override_pfn(mfn, ~0);
-       }
+       if (__pfn_to_mfn(pfn) != mfn)
+               pfn = ~0;
 
        /*
-        * pfn is ~0 if there are no entries in the m2p for mfn or if the
-        * entry doesn't map back to the mfn and m2p_override doesn't have a
-        * valid entry for it.
+        * pfn is ~0 if there are no entries in the m2p for mfn or the
+        * entry doesn't map back to the mfn.
         */
        if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
                pfn = mfn;
index d979e5a..536240f 100644 (file)
 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
 
+#define MSR_CORE_PERF_LIMIT_REASONS    0x00000690
+#define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
+#define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
+
 /* Hardware P state interface */
 #define MSR_PPERF                      0x0000064e
 #define MSR_PERF_LIMIT_REASONS         0x0000064f
 
 #define MSR_IA32_PERF_STATUS           0x00000198
 #define MSR_IA32_PERF_CTL              0x00000199
+#define INTEL_PERF_CTL_MASK            0xffff
 #define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
 #define MSR_AMD_PERF_STATUS            0xc0010063
 #define MSR_AMD_PERF_CTL               0xc0010062
index a18fff3..ae97ed0 100644 (file)
@@ -845,13 +845,7 @@ int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
 
 static int __init acpi_parse_sbf(struct acpi_table_header *table)
 {
-       struct acpi_table_boot *sb;
-
-       sb = (struct acpi_table_boot *)table;
-       if (!sb) {
-               printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-               return -ENODEV;
-       }
+       struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
 
        sbf_port = sb->cmos_index;      /* Save CMOS port */
 
@@ -865,13 +859,7 @@ static struct resource *hpet_res __initdata;
 
 static int __init acpi_parse_hpet(struct acpi_table_header *table)
 {
-       struct acpi_table_hpet *hpet_tbl;
-
-       hpet_tbl = (struct acpi_table_hpet *)table;
-       if (!hpet_tbl) {
-               printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-               return -ENODEV;
-       }
+       struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
 
        if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
                printk(KERN_WARNING PREFIX "HPET timers must be located in "
index cfd1b13..6ac2738 100644 (file)
@@ -10,9 +10,6 @@
 struct pci_root_info {
        struct acpi_device *bridge;
        char name[16];
-       unsigned int res_num;
-       struct resource *res;
-       resource_size_t *res_offset;
        struct pci_sysdata sd;
 #ifdef CONFIG_PCI_MMCONFIG
        bool mcfg_added;
@@ -218,130 +215,41 @@ static void teardown_mcfg_map(struct pci_root_info *info)
 }
 #endif
 
-static acpi_status resource_to_addr(struct acpi_resource *resource,
-                                   struct acpi_resource_address64 *addr)
-{
-       acpi_status status;
-       struct acpi_resource_memory24 *memory24;
-       struct acpi_resource_memory32 *memory32;
-       struct acpi_resource_fixed_memory32 *fixed_memory32;
-
-       memset(addr, 0, sizeof(*addr));
-       switch (resource->type) {
-       case ACPI_RESOURCE_TYPE_MEMORY24:
-               memory24 = &resource->data.memory24;
-               addr->resource_type = ACPI_MEMORY_RANGE;
-               addr->minimum = memory24->minimum;
-               addr->address_length = memory24->address_length;
-               addr->maximum = addr->minimum + addr->address_length - 1;
-               return AE_OK;
-       case ACPI_RESOURCE_TYPE_MEMORY32:
-               memory32 = &resource->data.memory32;
-               addr->resource_type = ACPI_MEMORY_RANGE;
-               addr->minimum = memory32->minimum;
-               addr->address_length = memory32->address_length;
-               addr->maximum = addr->minimum + addr->address_length - 1;
-               return AE_OK;
-       case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
-               fixed_memory32 = &resource->data.fixed_memory32;
-               addr->resource_type = ACPI_MEMORY_RANGE;
-               addr->minimum = fixed_memory32->address;
-               addr->address_length = fixed_memory32->address_length;
-               addr->maximum = addr->minimum + addr->address_length - 1;
-               return AE_OK;
-       case ACPI_RESOURCE_TYPE_ADDRESS16:
-       case ACPI_RESOURCE_TYPE_ADDRESS32:
-       case ACPI_RESOURCE_TYPE_ADDRESS64:
-               status = acpi_resource_to_address64(resource, addr);
-               if (ACPI_SUCCESS(status) &&
-                   (addr->resource_type == ACPI_MEMORY_RANGE ||
-                   addr->resource_type == ACPI_IO_RANGE) &&
-                   addr->address_length > 0) {
-                       return AE_OK;
-               }
-               break;
-       }
-       return AE_ERROR;
-}
-
-static acpi_status count_resource(struct acpi_resource *acpi_res, void *data)
+static void validate_resources(struct device *dev, struct list_head *crs_res,
+                              unsigned long type)
 {
-       struct pci_root_info *info = data;
-       struct acpi_resource_address64 addr;
-       acpi_status status;
-
-       status = resource_to_addr(acpi_res, &addr);
-       if (ACPI_SUCCESS(status))
-               info->res_num++;
-       return AE_OK;
-}
-
-static acpi_status setup_resource(struct acpi_resource *acpi_res, void *data)
-{
-       struct pci_root_info *info = data;
-       struct resource *res;
-       struct acpi_resource_address64 addr;
-       acpi_status status;
-       unsigned long flags;
-       u64 start, orig_end, end;
-
-       status = resource_to_addr(acpi_res, &addr);
-       if (!ACPI_SUCCESS(status))
-               return AE_OK;
-
-       if (addr.resource_type == ACPI_MEMORY_RANGE) {
-               flags = IORESOURCE_MEM;
-               if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
-                       flags |= IORESOURCE_PREFETCH;
-       } else if (addr.resource_type == ACPI_IO_RANGE) {
-               flags = IORESOURCE_IO;
-       } else
-               return AE_OK;
-
-       start = addr.minimum + addr.translation_offset;
-       orig_end = end = addr.maximum + addr.translation_offset;
-
-       /* Exclude non-addressable range or non-addressable portion of range */
-       end = min(end, (u64)iomem_resource.end);
-       if (end <= start) {
-               dev_info(&info->bridge->dev,
-                       "host bridge window [%#llx-%#llx] "
-                       "(ignored, not CPU addressable)\n", start, orig_end);
-               return AE_OK;
-       } else if (orig_end != end) {
-               dev_info(&info->bridge->dev,
-                       "host bridge window [%#llx-%#llx] "
-                       "([%#llx-%#llx] ignored, not CPU addressable)\n", 
-                       start, orig_end, end + 1, orig_end);
-       }
+       LIST_HEAD(list);
+       struct resource *res1, *res2, *root = NULL;
+       struct resource_entry *tmp, *entry, *entry2;
 
-       res = &info->res[info->res_num];
-       res->name = info->name;
-       res->flags = flags;
-       res->start = start;
-       res->end = end;
-       info->res_offset[info->res_num] = addr.translation_offset;
-       info->res_num++;
+       BUG_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0);
+       root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource;
 
-       if (!pci_use_crs)
-               dev_printk(KERN_DEBUG, &info->bridge->dev,
-                          "host bridge window %pR (ignored)\n", res);
+       list_splice_init(crs_res, &list);
+       resource_list_for_each_entry_safe(entry, tmp, &list) {
+               bool free = false;
+               resource_size_t end;
 
-       return AE_OK;
-}
-
-static void coalesce_windows(struct pci_root_info *info, unsigned long type)
-{
-       int i, j;
-       struct resource *res1, *res2;
-
-       for (i = 0; i < info->res_num; i++) {
-               res1 = &info->res[i];
+               res1 = entry->res;
                if (!(res1->flags & type))
-                       continue;
+                       goto next;
+
+               /* Exclude non-addressable range or non-addressable portion */
+               end = min(res1->end, root->end);
+               if (end <= res1->start) {
+                       dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n",
+                                res1);
+                       free = true;
+                       goto next;
+               } else if (res1->end != end) {
+                       dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n",
+                                res1, (unsigned long long)end + 1,
+                                (unsigned long long)res1->end);
+                       res1->end = end;
+               }
 
-               for (j = i + 1; j < info->res_num; j++) {
-                       res2 = &info->res[j];
+               resource_list_for_each_entry(entry2, crs_res) {
+                       res2 = entry2->res;
                        if (!(res2->flags & type))
                                continue;
 
@@ -353,118 +261,92 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type)
                        if (resource_overlaps(res1, res2)) {
                                res2->start = min(res1->start, res2->start);
                                res2->end = max(res1->end, res2->end);
-                               dev_info(&info->bridge->dev,
-                                        "host bridge window expanded to %pR; %pR ignored\n",
+                               dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n",
                                         res2, res1);
-                               res1->flags = 0;
+                               free = true;
+                               goto next;
                        }
                }
+
+next:
+               resource_list_del(entry);
+               if (free)
+                       resource_list_free_entry(entry);
+               else
+                       resource_list_add_tail(entry, crs_res);
        }
 }
 
 static void add_resources(struct pci_root_info *info,
-                         struct list_head *resources)
+                         struct list_head *resources,
+                         struct list_head *crs_res)
 {
-       int i;
-       struct resource *res, *root, *conflict;
-
-       coalesce_windows(info, IORESOURCE_MEM);
-       coalesce_windows(info, IORESOURCE_IO);
+       struct resource_entry *entry, *tmp;
+       struct resource *res, *conflict, *root = NULL;
 
-       for (i = 0; i < info->res_num; i++) {
-               res = &info->res[i];
+       validate_resources(&info->bridge->dev, crs_res, IORESOURCE_MEM);
+       validate_resources(&info->bridge->dev, crs_res, IORESOURCE_IO);
 
+       resource_list_for_each_entry_safe(entry, tmp, crs_res) {
+               res = entry->res;
                if (res->flags & IORESOURCE_MEM)
                        root = &iomem_resource;
                else if (res->flags & IORESOURCE_IO)
                        root = &ioport_resource;
                else
-                       continue;
+                       BUG_ON(res);
 
                conflict = insert_resource_conflict(root, res);
-               if (conflict)
+               if (conflict) {
                        dev_info(&info->bridge->dev,
                                 "ignoring host bridge window %pR (conflicts with %s %pR)\n",
                                 res, conflict->name, conflict);
-               else
-                       pci_add_resource_offset(resources, res,
-                                       info->res_offset[i]);
+                       resource_list_destroy_entry(entry);
+               }
        }
-}
 
-static void free_pci_root_info_res(struct pci_root_info *info)
-{
-       kfree(info->res);
-       info->res = NULL;
-       kfree(info->res_offset);
-       info->res_offset = NULL;
-       info->res_num = 0;
+       list_splice_tail(crs_res, resources);
 }
 
-static void __release_pci_root_info(struct pci_root_info *info)
+static void release_pci_root_info(struct pci_host_bridge *bridge)
 {
-       int i;
        struct resource *res;
+       struct resource_entry *entry;
+       struct pci_root_info *info = bridge->release_data;
 
-       for (i = 0; i < info->res_num; i++) {
-               res = &info->res[i];
-
-               if (!res->parent)
-                       continue;
-
-               if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
-                       continue;
-
-               release_resource(res);
+       resource_list_for_each_entry(entry, &bridge->windows) {
+               res = entry->res;
+               if (res->parent &&
+                   (res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
+                       release_resource(res);
        }
 
-       free_pci_root_info_res(info);
-
        teardown_mcfg_map(info);
-
        kfree(info);
 }
 
-static void release_pci_root_info(struct pci_host_bridge *bridge)
-{
-       struct pci_root_info *info = bridge->release_data;
-
-       __release_pci_root_info(info);
-}
-
 static void probe_pci_root_info(struct pci_root_info *info,
                                struct acpi_device *device,
-                               int busnum, int domain)
+                               int busnum, int domain,
+                               struct list_head *list)
 {
-       size_t size;
+       int ret;
+       struct resource_entry *entry;
 
        sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
        info->bridge = device;
-
-       info->res_num = 0;
-       acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
-                               info);
-       if (!info->res_num)
-               return;
-
-       size = sizeof(*info->res) * info->res_num;
-       info->res = kzalloc_node(size, GFP_KERNEL, info->sd.node);
-       if (!info->res) {
-               info->res_num = 0;
-               return;
-       }
-
-       size = sizeof(*info->res_offset) * info->res_num;
-       info->res_num = 0;
-       info->res_offset = kzalloc_node(size, GFP_KERNEL, info->sd.node);
-       if (!info->res_offset) {
-               kfree(info->res);
-               info->res = NULL;
-               return;
-       }
-
-       acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
-                               info);
+       ret = acpi_dev_get_resources(device, list,
+                                    acpi_dev_filter_resource_type_cb,
+                                    (void *)(IORESOURCE_IO | IORESOURCE_MEM));
+       if (ret < 0)
+               dev_warn(&device->dev,
+                        "failed to parse _CRS method, error code %d\n", ret);
+       else if (ret == 0)
+               dev_dbg(&device->dev,
+                       "no IO and memory resources present in _CRS\n");
+       else
+               resource_list_for_each_entry(entry, list)
+                       entry->res->name = info->name;
 }
 
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
@@ -473,6 +355,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
        struct pci_root_info *info;
        int domain = root->segment;
        int busnum = root->secondary.start;
+       struct resource_entry *res_entry;
+       LIST_HEAD(crs_res);
        LIST_HEAD(resources);
        struct pci_bus *bus;
        struct pci_sysdata *sd;
@@ -520,18 +404,22 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                memcpy(bus->sysdata, sd, sizeof(*sd));
                kfree(info);
        } else {
-               probe_pci_root_info(info, device, busnum, domain);
-
                /* insert busn res at first */
                pci_add_resource(&resources,  &root->secondary);
+
                /*
                 * _CRS with no apertures is normal, so only fall back to
                 * defaults or native bridge info if we're ignoring _CRS.
                 */
-               if (pci_use_crs)
-                       add_resources(info, &resources);
-               else {
-                       free_pci_root_info_res(info);
+               probe_pci_root_info(info, device, busnum, domain, &crs_res);
+               if (pci_use_crs) {
+                       add_resources(info, &resources, &crs_res);
+               } else {
+                       resource_list_for_each_entry(res_entry, &crs_res)
+                               dev_printk(KERN_DEBUG, &device->dev,
+                                          "host bridge window %pR (ignored)\n",
+                                          res_entry->res);
+                       resource_list_free(&crs_res);
                        x86_pci_root_bus_resources(busnum, &resources);
                }
 
@@ -546,8 +434,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
                                to_pci_host_bridge(bus->bridge),
                                release_pci_root_info, info);
                } else {
-                       pci_free_resource_list(&resources);
-                       __release_pci_root_info(info);
+                       resource_list_free(&resources);
+                       teardown_mcfg_map(info);
+                       kfree(info);
                }
        }
 
index f3a2cfc..7bcf06a 100644 (file)
@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
 {
        struct pci_root_info *info = x86_find_pci_root_info(bus);
        struct pci_root_res *root_res;
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
        bool found = false;
 
        if (!info)
@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
               bus);
 
        /* already added by acpi ? */
-       list_for_each_entry(window, resources, list)
+       resource_list_for_each_entry(window, resources)
                if (window->res->flags & IORESOURCE_BUS) {
                        found = true;
                        break;
index 2fb3847..3d2612b 100644 (file)
@@ -513,6 +513,31 @@ void __init pcibios_set_cache_line_size(void)
        }
 }
 
+/*
+ * Some device drivers assume dev->irq won't change after calling
+ * pci_disable_device(). So delay releasing of IRQ resource to driver
+ * unbinding time. Otherwise it will break PM subsystem and drivers
+ * like xen-pciback etc.
+ */
+static int pci_irq_notifier(struct notifier_block *nb, unsigned long action,
+                           void *data)
+{
+       struct pci_dev *dev = to_pci_dev(data);
+
+       if (action != BUS_NOTIFY_UNBOUND_DRIVER)
+               return NOTIFY_DONE;
+
+       if (pcibios_disable_irq)
+               pcibios_disable_irq(dev);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block pci_irq_nb = {
+       .notifier_call = pci_irq_notifier,
+       .priority = INT_MIN,
+};
+
 int __init pcibios_init(void)
 {
        if (!raw_pci_ops) {
@@ -525,6 +550,9 @@ int __init pcibios_init(void)
 
        if (pci_bf_sort >= pci_force_bf)
                pci_sort_breadthfirst();
+
+       bus_register_notifier(&pci_bus_type, &pci_irq_nb);
+
        return 0;
 }
 
@@ -683,12 +711,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
        return 0;
 }
 
-void pcibios_disable_device (struct pci_dev *dev)
-{
-       if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
-               pcibios_disable_irq(dev);
-}
-
 int pci_ext_cfg_avail(void)
 {
        if (raw_pci_ext_ops)
index 852aa4c..efb8493 100644 (file)
@@ -234,10 +234,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 
 static void intel_mid_pci_irq_disable(struct pci_dev *dev)
 {
-       if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
-           dev->irq > 0) {
+       if (dev->irq_managed && dev->irq > 0) {
                mp_unmap_irq(dev->irq);
                dev->irq_managed = 0;
+               dev->irq = 0;
        }
 }
 
index 5dc6ca5..e71b3db 100644 (file)
@@ -1256,22 +1256,9 @@ static int pirq_enable_irq(struct pci_dev *dev)
        return 0;
 }
 
-bool mp_should_keep_irq(struct device *dev)
-{
-       if (dev->power.is_prepared)
-               return true;
-#ifdef CONFIG_PM
-       if (dev->power.runtime_status == RPM_SUSPENDING)
-               return true;
-#endif
-
-       return false;
-}
-
 static void pirq_disable_irq(struct pci_dev *dev)
 {
-       if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
-           dev->irq_managed && dev->irq) {
+       if (io_apic_assign_pci_irqs && dev->irq_managed && dev->irq) {
                mp_unmap_irq(dev->irq);
                dev->irq = 0;
                dev->irq_managed = 0;
index 676e5e0..dd30b7e 100644 (file)
@@ -397,12 +397,12 @@ static acpi_status check_mcfg_resource(struct acpi_resource *res, void *data)
 
        status = acpi_resource_to_address64(res, &address);
        if (ACPI_FAILURE(status) ||
-          (address.address_length <= 0) ||
+          (address.address.address_length <= 0) ||
           (address.resource_type != ACPI_MEMORY_RANGE))
                return AE_OK;
 
-       if ((mcfg_res->start >= address.minimum) &&
-           (mcfg_res->end < (address.minimum + address.address_length))) {
+       if ((mcfg_res->start >= address.address.minimum) &&
+           (mcfg_res->end < (address.address.minimum + address.address.address_length))) {
                mcfg_res->flags = 1;
                return AE_CTRL_TERMINATE;
        }
index 9098d88..d22f4b5 100644 (file)
@@ -298,12 +298,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                        map_irq.entry_nr = nvec;
                } else if (type == PCI_CAP_ID_MSIX) {
                        int pos;
+                       unsigned long flags;
                        u32 table_offset, bir;
 
                        pos = dev->msix_cap;
                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
                                              &table_offset);
                        bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+                       flags = pci_resource_flags(dev, bir);
+                       if (!flags || (flags & IORESOURCE_UNSET))
+                               return -EINVAL;
 
                        map_irq.table_base = pci_resource_start(dev, bir);
                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
index 5c1f9ac..adca9e2 100644 (file)
@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
        native_set_pte(ptep, pte);
 }
 
-static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
 {
        struct mmuext_op op;
        op.cmd = cmd;
@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void)
  * Like __va(), but returns address in the kernel mapping (which is
  * all we have until the physical memory mapping has been set up.
  */
-static void *__ka(phys_addr_t paddr)
+static void * __init __ka(phys_addr_t paddr)
 {
 #ifdef CONFIG_X86_64
        return (void *)(paddr + __START_KERNEL_map);
@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr)
 }
 
 /* Convert a machine address to physical address */
-static unsigned long m2p(phys_addr_t maddr)
+static unsigned long __init m2p(phys_addr_t maddr)
 {
        phys_addr_t paddr;
 
@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr)
 }
 
 /* Convert a machine address to kernel virtual */
-static void *m2v(phys_addr_t maddr)
+static void * __init m2v(phys_addr_t maddr)
 {
        return __ka(m2p(maddr));
 }
 
 /* Set the page permissions on an identity-mapped pages */
-static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
+static void __init set_page_prot_flags(void *addr, pgprot_t prot,
+                                      unsigned long flags)
 {
        unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
        pte_t pte = pfn_pte(pfn, prot);
@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
        if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
                BUG();
 }
-static void set_page_prot(void *addr, pgprot_t prot)
+static void __init set_page_prot(void *addr, pgprot_t prot)
 {
        return set_page_prot_flags(addr, prot, UVMF_NONE);
 }
@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
                        pte_t pte;
 
-#ifdef CONFIG_X86_32
                        if (pfn > max_pfn_mapped)
                                max_pfn_mapped = pfn;
-#endif
 
                        if (!pte_none(pte_page[pteidx]))
                                continue;
@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void)
 }
 
 #ifdef CONFIG_X86_64
-static void convert_pfn_mfn(void *v)
+static void __init convert_pfn_mfn(void *v)
 {
        pte_t *pte = v;
        int i;
index 70fb507..f18fd1d 100644 (file)
@@ -84,8 +84,6 @@
 
 #define PMDS_PER_MID_PAGE      (P2M_MID_PER_PAGE / PTRS_PER_PTE)
 
-static void __init m2p_override_init(void);
-
 unsigned long *xen_p2m_addr __read_mostly;
 EXPORT_SYMBOL_GPL(xen_p2m_addr);
 unsigned long xen_p2m_size __read_mostly;
@@ -402,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void)
        xen_p2m_size = xen_max_p2m_pfn;
 
        xen_inv_extra_mem();
-
-       m2p_override_init();
 }
 
 unsigned long get_phys_to_machine(unsigned long pfn)
@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        return true;
 }
 
-#define M2P_OVERRIDE_HASH_SHIFT        10
-#define M2P_OVERRIDE_HASH      (1 << M2P_OVERRIDE_HASH_SHIFT)
-
-static struct list_head *m2p_overrides;
-static DEFINE_SPINLOCK(m2p_override_lock);
-
-static void __init m2p_override_init(void)
-{
-       unsigned i;
-
-       m2p_overrides = alloc_bootmem_align(
-                               sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
-                               sizeof(unsigned long));
-
-       for (i = 0; i < M2P_OVERRIDE_HASH; i++)
-               INIT_LIST_HEAD(&m2p_overrides[i]);
-}
-
-static unsigned long mfn_hash(unsigned long mfn)
-{
-       return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
-}
-
-/* Add an MFN override for a particular page */
-static int m2p_add_override(unsigned long mfn, struct page *page,
-                           struct gnttab_map_grant_ref *kmap_op)
-{
-       unsigned long flags;
-       unsigned long pfn;
-       unsigned long uninitialized_var(address);
-       unsigned level;
-       pte_t *ptep = NULL;
-
-       pfn = page_to_pfn(page);
-       if (!PageHighMem(page)) {
-               address = (unsigned long)__va(pfn << PAGE_SHIFT);
-               ptep = lookup_address(address, &level);
-               if (WARN(ptep == NULL || level != PG_LEVEL_4K,
-                        "m2p_add_override: pfn %lx not mapped", pfn))
-                       return -EINVAL;
-       }
-
-       if (kmap_op != NULL) {
-               if (!PageHighMem(page)) {
-                       struct multicall_space mcs =
-                               xen_mc_entry(sizeof(*kmap_op));
-
-                       MULTI_grant_table_op(mcs.mc,
-                                       GNTTABOP_map_grant_ref, kmap_op, 1);
-
-                       xen_mc_issue(PARAVIRT_LAZY_MMU);
-               }
-       }
-       spin_lock_irqsave(&m2p_override_lock, flags);
-       list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
-       spin_unlock_irqrestore(&m2p_override_lock, flags);
-
-       /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
-        * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
-        * pfn so that the following mfn_to_pfn(mfn) calls will return the
-        * pfn from the m2p_override (the backend pfn) instead.
-        * We need to do this because the pages shared by the frontend
-        * (xen-blkfront) can be already locked (lock_page, called by
-        * do_read_cache_page); when the userspace backend tries to use them
-        * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
-        * do_blockdev_direct_IO is going to try to lock the same pages
-        * again resulting in a deadlock.
-        * As a side effect get_user_pages_fast might not be safe on the
-        * frontend pages while they are being shared with the backend,
-        * because mfn_to_pfn (that ends up being called by GUPF) will
-        * return the backend pfn rather than the frontend pfn. */
-       pfn = mfn_to_pfn_no_overrides(mfn);
-       if (__pfn_to_mfn(pfn) == mfn)
-               set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
-
-       return 0;
-}
-
 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                            struct gnttab_map_grant_ref *kmap_ops,
                            struct page **pages, unsigned int count)
 {
        int i, ret = 0;
-       bool lazy = false;
        pte_t *pte;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return 0;
 
-       if (kmap_ops &&
-           !in_interrupt() &&
-           paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
-               arch_enter_lazy_mmu_mode();
-               lazy = true;
+       if (kmap_ops) {
+               ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
+                                               kmap_ops, count);
+               if (ret)
+                       goto out;
        }
 
        for (i = 0; i < count; i++) {
@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
                }
                pfn = page_to_pfn(pages[i]);
 
-               WARN_ON(PagePrivate(pages[i]));
-               SetPagePrivate(pages[i]);
-               set_page_private(pages[i], mfn);
-               pages[i]->index = pfn_to_mfn(pfn);
+               WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
 
                if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
                        ret = -ENOMEM;
                        goto out;
                }
-
-               if (kmap_ops) {
-                       ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
-                       if (ret)
-                               goto out;
-               }
        }
 
 out:
-       if (lazy)
-               arch_leave_lazy_mmu_mode();
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
 
-static struct page *m2p_find_override(unsigned long mfn)
-{
-       unsigned long flags;
-       struct list_head *bucket;
-       struct page *p, *ret;
-
-       if (unlikely(!m2p_overrides))
-               return NULL;
-
-       ret = NULL;
-       bucket = &m2p_overrides[mfn_hash(mfn)];
-
-       spin_lock_irqsave(&m2p_override_lock, flags);
-
-       list_for_each_entry(p, bucket, lru) {
-               if (page_private(p) == mfn) {
-                       ret = p;
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&m2p_override_lock, flags);
-
-       return ret;
-}
-
-static int m2p_remove_override(struct page *page,
-                              struct gnttab_map_grant_ref *kmap_op,
-                              unsigned long mfn)
-{
-       unsigned long flags;
-       unsigned long pfn;
-       unsigned long uninitialized_var(address);
-       unsigned level;
-       pte_t *ptep = NULL;
-
-       pfn = page_to_pfn(page);
-
-       if (!PageHighMem(page)) {
-               address = (unsigned long)__va(pfn << PAGE_SHIFT);
-               ptep = lookup_address(address, &level);
-
-               if (WARN(ptep == NULL || level != PG_LEVEL_4K,
-                        "m2p_remove_override: pfn %lx not mapped", pfn))
-                       return -EINVAL;
-       }
-
-       spin_lock_irqsave(&m2p_override_lock, flags);
-       list_del(&page->lru);
-       spin_unlock_irqrestore(&m2p_override_lock, flags);
-
-       if (kmap_op != NULL) {
-               if (!PageHighMem(page)) {
-                       struct multicall_space mcs;
-                       struct gnttab_unmap_and_replace *unmap_op;
-                       struct page *scratch_page = get_balloon_scratch_page();
-                       unsigned long scratch_page_address = (unsigned long)
-                               __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
-
-                       /*
-                        * It might be that we queued all the m2p grant table
-                        * hypercalls in a multicall, then m2p_remove_override
-                        * get called before the multicall has actually been
-                        * issued. In this case handle is going to -1 because
-                        * it hasn't been modified yet.
-                        */
-                       if (kmap_op->handle == -1)
-                               xen_mc_flush();
-                       /*
-                        * Now if kmap_op->handle is negative it means that the
-                        * hypercall actually returned an error.
-                        */
-                       if (kmap_op->handle == GNTST_general_error) {
-                               pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
-                                       pfn, mfn);
-                               put_balloon_scratch_page();
-                               return -1;
-                       }
-
-                       xen_mc_batch();
-
-                       mcs = __xen_mc_entry(
-                               sizeof(struct gnttab_unmap_and_replace));
-                       unmap_op = mcs.args;
-                       unmap_op->host_addr = kmap_op->host_addr;
-                       unmap_op->new_addr = scratch_page_address;
-                       unmap_op->handle = kmap_op->handle;
-
-                       MULTI_grant_table_op(mcs.mc,
-                               GNTTABOP_unmap_and_replace, unmap_op, 1);
-
-                       mcs = __xen_mc_entry(0);
-                       MULTI_update_va_mapping(mcs.mc, scratch_page_address,
-                                       pfn_pte(page_to_pfn(scratch_page),
-                                       PAGE_KERNEL_RO), 0);
-
-                       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-                       kmap_op->host_addr = 0;
-                       put_balloon_scratch_page();
-               }
-       }
-
-       /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
-        * somewhere in this domain, even before being added to the
-        * m2p_override (see comment above in m2p_add_override).
-        * If there are no other entries in the m2p_override corresponding
-        * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
-        * the original pfn (the one shared by the frontend): the backend
-        * cannot do any IO on this page anymore because it has been
-        * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
-        * the original pfn causes mfn_to_pfn(mfn) to return the frontend
-        * pfn again. */
-       mfn &= ~FOREIGN_FRAME_BIT;
-       pfn = mfn_to_pfn_no_overrides(mfn);
-       if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
-                       m2p_find_override(mfn) == NULL)
-               set_phys_to_machine(pfn, mfn);
-
-       return 0;
-}
-
 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
-                             struct gnttab_map_grant_ref *kmap_ops,
+                             struct gnttab_unmap_grant_ref *kunmap_ops,
                              struct page **pages, unsigned int count)
 {
        int i, ret = 0;
-       bool lazy = false;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return 0;
 
-       if (kmap_ops &&
-           !in_interrupt() &&
-           paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
-               arch_enter_lazy_mmu_mode();
-               lazy = true;
-       }
-
        for (i = 0; i < count; i++) {
                unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
                unsigned long pfn = page_to_pfn(pages[i]);
@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
                        goto out;
                }
 
-               set_page_private(pages[i], INVALID_P2M_ENTRY);
-               WARN_ON(!PagePrivate(pages[i]));
-               ClearPagePrivate(pages[i]);
-               set_phys_to_machine(pfn, pages[i]->index);
-
-               if (kmap_ops)
-                       ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
-               if (ret)
-                       goto out;
+               set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
        }
-
+       if (kunmap_ops)
+               ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
+                                               kunmap_ops, count);
 out:
-       if (lazy)
-               arch_leave_lazy_mmu_mode();
        return ret;
 }
 EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
 
-unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
-{
-       struct page *p = m2p_find_override(mfn);
-       unsigned long ret = pfn;
-
-       if (p)
-               ret = page_to_pfn(p);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
-
 #ifdef CONFIG_XEN_DEBUG_FS
 #include <linux/debugfs.h>
 #include "debugfs.h"
index 865e56c..55f388e 100644 (file)
 #include "p2m.h"
 #include "mmu.h"
 
-/* These are code, but not functions.  Defined in entry.S */
-extern const char xen_hypervisor_callback[];
-extern const char xen_failsafe_callback[];
-#ifdef CONFIG_X86_64
-extern asmlinkage void nmi(void);
-#endif
-extern void xen_sysenter_target(void);
-extern void xen_syscall_target(void);
-extern void xen_syscall32_target(void);
-
 /* Amount of extra memory space we add to the e820 ranges */
 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
  */
 #define EXTRA_MEM_RATIO                (10)
 
-static void __init xen_add_extra_mem(u64 start, u64 size)
+static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
 {
        int i;
 
@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
        memblock_reserve(start, size);
 }
 
-static void __init xen_del_extra_mem(u64 start, u64 size)
+static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
 {
        int i;
-       u64 start_r, size_r;
+       phys_addr_t start_r, size_r;
 
        for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
                start_r = xen_extra_mem[i].start;
@@ -267,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
 static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
 {
        struct mmu_update update = {
-               .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
+               .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
                .val = pfn
        };
 
@@ -545,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void)
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
-static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
+static void __init xen_align_and_add_e820_region(phys_addr_t start,
+                                                phys_addr_t size, int type)
 {
-       u64 end = start + size;
+       phys_addr_t end = start + size;
 
        /* Align RAM regions to page boundaries. */
        if (type == E820_RAM) {
                start = PAGE_ALIGN(start);
-               end &= ~((u64)PAGE_SIZE - 1);
+               end &= ~((phys_addr_t)PAGE_SIZE - 1);
        }
 
        e820_add_region(start, end - start, type);
 }
 
-void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
 {
        struct e820entry *entry;
        unsigned int i;
@@ -577,7 +568,7 @@ char * __init xen_memory_setup(void)
        static struct e820entry map[E820MAX] __initdata;
 
        unsigned long max_pfn = xen_start_info->nr_pages;
-       unsigned long long mem_end;
+       phys_addr_t mem_end;
        int rc;
        struct xen_memory_map memmap;
        unsigned long max_pages;
@@ -652,16 +643,16 @@ char * __init xen_memory_setup(void)
                          extra_pages);
        i = 0;
        while (i < memmap.nr_entries) {
-               u64 addr = map[i].addr;
-               u64 size = map[i].size;
+               phys_addr_t addr = map[i].addr;
+               phys_addr_t size = map[i].size;
                u32 type = map[i].type;
 
                if (type == E820_RAM) {
                        if (addr < mem_end) {
                                size = min(size, mem_end - addr);
                        } else if (extra_pages) {
-                               size = min(size, (u64)extra_pages * PAGE_SIZE);
-                               extra_pages -= size / PAGE_SIZE;
+                               size = min(size, PFN_PHYS(extra_pages));
+                               extra_pages -= PFN_DOWN(size);
                                xen_add_extra_mem(addr, size);
                                xen_max_p2m_pfn = PFN_DOWN(addr + size);
                        } else
index 4c071ae..08e8489 100644 (file)
@@ -507,7 +507,7 @@ static int xen_cpu_disable(void)
 static void xen_cpu_die(unsigned int cpu)
 {
        while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
-               current->state = TASK_UNINTERRUPTIBLE;
+               __set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(HZ/10);
        }
 
index 6908734..55da33b 100644 (file)
@@ -479,6 +479,10 @@ static void __init xen_time_init(void)
        int cpu = smp_processor_id();
        struct timespec tp;
 
+       /* As Dom0 is never moved, no penalty on using TSC there */
+       if (xen_initial_domain())
+               xen_clocksource.rating = 275;
+
        clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
 
        if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
index 5686bd9..9e195c6 100644 (file)
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 
+void xen_sysenter_target(void);
+#ifdef CONFIG_X86_64
+void xen_syscall_target(void);
+void xen_syscall32_target(void);
+#endif
+
 extern void *xen_initial_gdt;
 
 struct trap_info;
index 8951cef..e6c3ddd 100644 (file)
@@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY
          To compile this driver as a module, choose M here:
          the module will be called acpi_memhotplug.
 
+config ACPI_HOTPLUG_IOAPIC
+       bool
+       depends on PCI
+       depends on X86_IO_APIC
+       default y
+
 config ACPI_SBS
        tristate "Smart Battery System"
        depends on X86
index f74317c..b18cd21 100644 (file)
@@ -40,7 +40,7 @@ acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
 acpi-y                         += ec.o
 acpi-$(CONFIG_ACPI_DOCK)       += dock.o
 acpi-y                         += pci_root.o pci_link.o pci_irq.o
-acpi-y                         += acpi_lpss.o
+acpi-y                         += acpi_lpss.o acpi_apd.o
 acpi-y                         += acpi_platform.o
 acpi-y                         += acpi_pnp.o
 acpi-y                         += int340x_thermal.o
@@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR)  += processor.o
 obj-y                          += container.o
 obj-$(CONFIG_ACPI_THERMAL)     += thermal.o
 obj-y                          += acpi_memhotplug.o
+obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
 obj-$(CONFIG_ACPI_BATTERY)     += battery.o
 obj-$(CONFIG_ACPI_SBS)         += sbshc.o
 obj-$(CONFIG_ACPI_SBS)         += sbs.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
new file mode 100644 (file)
index 0000000..3984ea9
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * AMD ACPI support for ACPI2platform device.
+ *
+ * Copyright (c) 2014,2015 AMD Corporation.
+ * Authors: Ken Xue <Ken.Xue@amd.com>
+ *     Wu, Jeff <Jeff.Wu@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/clkdev.h>
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/pm.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("acpi_apd");
+struct apd_private_data;
+
+/**
+ * ACPI_APD_SYSFS : add device attributes in sysfs
+ * ACPI_APD_PM : attach power domain to device
+ */
+#define ACPI_APD_SYSFS BIT(0)
+#define ACPI_APD_PM    BIT(1)
+
+/**
+ * struct apd_device_desc - a descriptor for apd device
+ * @flags: device flags like %ACPI_APD_SYSFS, %ACPI_APD_PM
+ * @fixed_clk_rate: fixed rate input clock source for acpi device;
+ *                     0 means no fixed rate input clock source
+ * @setup: a hook routine to set device resource during create platform device
+ *
+ * Device description defined as acpi_device_id.driver_data
+ */
+struct apd_device_desc {
+       unsigned int flags;
+       unsigned int fixed_clk_rate;
+       int (*setup)(struct apd_private_data *pdata);
+};
+
+struct apd_private_data {
+       struct clk *clk;
+       struct acpi_device *adev;
+       const struct apd_device_desc *dev_desc;
+};
+
+#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
+#define APD_ADDR(desc) ((unsigned long)&desc)
+
+static int acpi_apd_setup(struct apd_private_data *pdata)
+{
+       const struct apd_device_desc *dev_desc = pdata->dev_desc;
+       struct clk *clk = ERR_PTR(-ENODEV);
+
+       if (dev_desc->fixed_clk_rate) {
+               clk = clk_register_fixed_rate(&pdata->adev->dev,
+                                       dev_name(&pdata->adev->dev),
+                                       NULL, CLK_IS_ROOT,
+                                       dev_desc->fixed_clk_rate);
+               clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
+               pdata->clk = clk;
+       }
+
+       return 0;
+}
+
+static struct apd_device_desc cz_i2c_desc = {
+       .setup = acpi_apd_setup,
+       .fixed_clk_rate = 133000000,
+};
+
+static struct apd_device_desc cz_uart_desc = {
+       .setup = acpi_apd_setup,
+       .fixed_clk_rate = 48000000,
+};
+
+#else
+
+#define APD_ADDR(desc) (0UL)
+
+#endif /* CONFIG_X86_AMD_PLATFORM_DEVICE */
+
+/**
+* Create platform device during acpi scan attach handle.
+* Return value > 0 on success of creating device.
+*/
+static int acpi_apd_create_device(struct acpi_device *adev,
+                                  const struct acpi_device_id *id)
+{
+       const struct apd_device_desc *dev_desc = (void *)id->driver_data;
+       struct apd_private_data *pdata;
+       struct platform_device *pdev;
+       int ret;
+
+       if (!dev_desc) {
+               pdev = acpi_create_platform_device(adev);
+               return IS_ERR_OR_NULL(pdev) ? PTR_ERR(pdev) : 1;
+       }
+
+       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
+
+       pdata->adev = adev;
+       pdata->dev_desc = dev_desc;
+
+       if (dev_desc->setup) {
+               ret = dev_desc->setup(pdata);
+               if (ret)
+                       goto err_out;
+       }
+
+       adev->driver_data = pdata;
+       pdev = acpi_create_platform_device(adev);
+       if (!IS_ERR_OR_NULL(pdev))
+               return 1;
+
+       ret = PTR_ERR(pdev);
+       adev->driver_data = NULL;
+
+ err_out:
+       kfree(pdata);
+       return ret;
+}
+
+static const struct acpi_device_id acpi_apd_device_ids[] = {
+       /* Generic apd devices */
+       { "AMD0010", APD_ADDR(cz_i2c_desc) },
+       { "AMD0020", APD_ADDR(cz_uart_desc) },
+       { "AMD0030", },
+       { }
+};
+
+static struct acpi_scan_handler apd_handler = {
+       .ids = acpi_apd_device_ids,
+       .attach = acpi_apd_create_device,
+};
+
+void __init acpi_apd_init(void)
+{
+       acpi_scan_add_handler(&apd_handler);
+}
index e75737f..02e835f 100644 (file)
@@ -125,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = {
 };
 
 static struct lpss_device_desc lpt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
+       .flags = LPSS_CLK | LPSS_LTR,
        .prv_offset = 0x800,
 };
 
@@ -307,7 +307,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
 {
        struct lpss_device_desc *dev_desc;
        struct lpss_private_data *pdata;
-       struct resource_list_entry *rentry;
+       struct resource_entry *rentry;
        struct list_head resource_list;
        struct platform_device *pdev;
        int ret;
@@ -327,13 +327,15 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
                goto err_out;
 
        list_for_each_entry(rentry, &resource_list, node)
-               if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+               if (resource_type(rentry->res) == IORESOURCE_MEM) {
                        if (dev_desc->prv_size_override)
                                pdata->mmio_size = dev_desc->prv_size_override;
                        else
-                               pdata->mmio_size = resource_size(&rentry->res);
-                       pdata->mmio_base = ioremap(rentry->res.start,
+                               pdata->mmio_size = resource_size(rentry->res);
+                       pdata->mmio_base = ioremap(rentry->res->start,
                                                   pdata->mmio_size);
+                       if (!pdata->mmio_base)
+                               goto err_out;
                        break;
                }
 
index 23e2319..ee28f4d 100644 (file)
@@ -101,8 +101,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
                /* Can we combine the resource range information? */
                if ((info->caching == address64.info.mem.caching) &&
                    (info->write_protect == address64.info.mem.write_protect) &&
-                   (info->start_addr + info->length == address64.minimum)) {
-                       info->length += address64.address_length;
+                   (info->start_addr + info->length == address64.address.minimum)) {
+                       info->length += address64.address.address_length;
                        return AE_OK;
                }
        }
@@ -114,8 +114,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
        INIT_LIST_HEAD(&new->list);
        new->caching = address64.info.mem.caching;
        new->write_protect = address64.info.mem.write_protect;
-       new->start_addr = address64.minimum;
-       new->length = address64.address_length;
+       new->start_addr = address64.address.minimum;
+       new->length = address64.address.address_length;
        list_add_tail(&new->list, &mem_device->res_list);
 
        return AE_OK;
index 6ba8beb..1284138 100644 (file)
@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
        struct platform_device *pdev = NULL;
        struct acpi_device *acpi_parent;
        struct platform_device_info pdevinfo;
-       struct resource_list_entry *rentry;
+       struct resource_entry *rentry;
        struct list_head resource_list;
        struct resource *resources = NULL;
        int count;
@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
                }
                count = 0;
                list_for_each_entry(rentry, &resource_list, node)
-                       resources[count++] = rentry->res;
+                       resources[count++] = *rentry->res;
 
                acpi_dev_free_resource_list(&resource_list);
        }
index 3d2c882..d863016 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
 /* Common info for tool signons */
 
 #define ACPICA_NAME                 "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT            "Copyright (c) 2000 - 2014 Intel Corporation"
+#define ACPICA_COPYRIGHT            "Copyright (c) 2000 - 2015 Intel Corporation"
 
 #if ACPI_MACHINE_WIDTH == 64
 #define ACPI_WIDTH          "-64"
index 6f1c616..853aa2d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1d026ff..4169bb8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d3e2cc3..408f04b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7a7811a..228704b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -143,8 +143,6 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
 acpi_status
 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
 
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
-
 acpi_status
 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                       struct acpi_gpe_block_info *gpe_block, void *context);
index 7f60582..a165d25 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c318d3e..196a552 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b01f71c..1886bde 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 680d23b..7add32e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4bceb11..cf607fe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ee1c040..952fbe0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8abb393..3e9720e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dda0e6a..a5f17de 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6168b85..74a390c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bd3908d..a972d11 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4b008e8..efc4c71 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cf73461..d14b547 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1afe46e..1c127a4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 486d342..c2f03e8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5908cce..3a95068 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3a0beeb..ee0cdd6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 720b1cd..3e69897 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8daf9de..39da9da 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c576661..43b40de 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aee5e45..bbe74bc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3c7f737..d72565a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b67522d..2e4c42b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a1e7e6b..8a7b07b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6c0759c..7724418 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9f74795..e5ff89b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f7f5107..df54d46 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 15623da..843942f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2ac28d2..fcaa30c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9d6e2c1..43b3ea4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 24f7d5e..89ac202 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c7bffff..bf6873f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3393a73..b78dc7c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aa70154..5ed064e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -114,17 +114,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 
-       /*
-        * We will only allow a GPE to be enabled if it has either an associated
-        * method (_Lxx/_Exx) or a handler, or is using the implicit notify
-        * feature. Otherwise, the GPE will be immediately disabled by
-        * acpi_ev_gpe_dispatch the first time it fires.
-        */
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-           ACPI_GPE_DISPATCH_NONE) {
-               return_ACPI_STATUS(AE_NO_HANDLER);
-       }
-
        /* Clear the GPE (of stale events) */
 
        status = acpi_hw_clear_gpe(gpe_event_info);
@@ -339,7 +328,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
 {
        acpi_status status;
        struct acpi_gpe_block_info *gpe_block;
+       struct acpi_namespace_node *gpe_device;
        struct acpi_gpe_register_info *gpe_register_info;
+       struct acpi_gpe_event_info *gpe_event_info;
+       u32 gpe_number;
+       struct acpi_gpe_handler_info *gpe_handler_info;
        u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
        u8 enabled_status_byte;
        u32 status_reg;
@@ -367,6 +360,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
 
        gpe_block = gpe_xrupt_list->gpe_block_list_head;
        while (gpe_block) {
+               gpe_device = gpe_block->node;
+
                /*
                 * Read all of the 8-bit GPE status and enable registers in this GPE
                 * block, saving all of them. Find all currently active GP events.
@@ -442,16 +437,68 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
 
                                /* Examine one GPE bit */
 
+                               gpe_event_info =
+                                   &gpe_block->
+                                   event_info[((acpi_size) i *
+                                               ACPI_GPE_REGISTER_WIDTH) + j];
+                               gpe_number =
+                                   j + gpe_register_info->base_gpe_number;
+
                                if (enabled_status_byte & (1 << j)) {
-                                       /*
-                                        * Found an active GPE. Dispatch the event to a handler
-                                        * or method.
-                                        */
-                                       int_status |=
-                                           acpi_ev_gpe_dispatch(gpe_block->
-                                                                node,
-                                                                &gpe_block->
-                                                                event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
+
+                                       /* Invoke global event handler if present */
+
+                                       acpi_gpe_count++;
+                                       if (acpi_gbl_global_event_handler) {
+                                               acpi_gbl_global_event_handler
+                                                   (ACPI_EVENT_TYPE_GPE,
+                                                    gpe_device, gpe_number,
+                                                    acpi_gbl_global_event_handler_context);
+                                       }
+
+                                       /* Found an active GPE */
+
+                                       if (ACPI_GPE_DISPATCH_TYPE
+                                           (gpe_event_info->flags) ==
+                                           ACPI_GPE_DISPATCH_RAW_HANDLER) {
+
+                                               /* Dispatch the event to a raw handler */
+
+                                               gpe_handler_info =
+                                                   gpe_event_info->dispatch.
+                                                   handler;
+
+                                               /*
+                                                * There is no protection around the namespace node
+                                                * and the GPE handler to ensure a safe destruction
+                                                * because:
+                                                * 1. The namespace node is expected to always
+                                                *    exist after loading a table.
+                                                * 2. The GPE handler is expected to be flushed by
+                                                *    acpi_os_wait_events_complete() before the
+                                                *    destruction.
+                                                */
+                                               acpi_os_release_lock
+                                                   (acpi_gbl_gpe_lock, flags);
+                                               int_status |=
+                                                   gpe_handler_info->
+                                                   address(gpe_device,
+                                                           gpe_number,
+                                                           gpe_handler_info->
+                                                           context);
+                                               flags =
+                                                   acpi_os_acquire_lock
+                                                   (acpi_gbl_gpe_lock);
+                                       } else {
+                                               /*
+                                                * Dispatch the event to a standard handler or
+                                                * method.
+                                                */
+                                               int_status |=
+                                                   acpi_ev_gpe_dispatch
+                                                   (gpe_device, gpe_event_info,
+                                                    gpe_number);
+                                       }
                                }
                        }
                }
@@ -484,52 +531,15 @@ unlock_and_exit:
 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
 {
        struct acpi_gpe_event_info *gpe_event_info = context;
-       acpi_status status;
-       struct acpi_gpe_event_info *local_gpe_event_info;
+       acpi_status status = AE_OK;
        struct acpi_evaluate_info *info;
        struct acpi_gpe_notify_info *notify;
 
        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 
-       /* Allocate a local GPE block */
-
-       local_gpe_event_info =
-           ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
-       if (!local_gpe_event_info) {
-               ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
-               return_VOID;
-       }
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
-       if (ACPI_FAILURE(status)) {
-               ACPI_FREE(local_gpe_event_info);
-               return_VOID;
-       }
-
-       /* Must revalidate the gpe_number/gpe_block */
-
-       if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
-               status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-               ACPI_FREE(local_gpe_event_info);
-               return_VOID;
-       }
-
-       /*
-        * Take a snapshot of the GPE info for this level - we copy the info to
-        * prevent a race condition with remove_handler/remove_block.
-        */
-       ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
-                   sizeof(struct acpi_gpe_event_info));
-
-       status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
-       if (ACPI_FAILURE(status)) {
-               ACPI_FREE(local_gpe_event_info);
-               return_VOID;
-       }
-
        /* Do the correct dispatch - normal method or implicit notify */
 
-       switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+       switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
        case ACPI_GPE_DISPATCH_NOTIFY:
                /*
                 * Implicit notify.
@@ -542,7 +552,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                 * June 2012: Expand implicit notify mechanism to support
                 * notifies on multiple device objects.
                 */
-               notify = local_gpe_event_info->dispatch.notify_list;
+               notify = gpe_event_info->dispatch.notify_list;
                while (ACPI_SUCCESS(status) && notify) {
                        status =
                            acpi_ev_queue_notify_request(notify->device_node,
@@ -566,7 +576,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                         * _Lxx/_Exx control method that corresponds to this GPE
                         */
                        info->prefix_node =
-                           local_gpe_event_info->dispatch.method_node;
+                           gpe_event_info->dispatch.method_node;
                        info->flags = ACPI_IGNORE_RETURN_VALUE;
 
                        status = acpi_ns_evaluate(info);
@@ -576,25 +586,27 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
                if (ACPI_FAILURE(status)) {
                        ACPI_EXCEPTION((AE_INFO, status,
                                        "while evaluating GPE method [%4.4s]",
-                                       acpi_ut_get_node_name
-                                       (local_gpe_event_info->dispatch.
-                                        method_node)));
+                                       acpi_ut_get_node_name(gpe_event_info->
+                                                             dispatch.
+                                                             method_node)));
                }
                break;
 
        default:
 
-               return_VOID;    /* Should never happen */
+               goto error_exit;        /* Should never happen */
        }
 
        /* Defer enabling of GPE until all notify handlers are done */
 
        status = acpi_os_execute(OSL_NOTIFY_HANDLER,
-                                acpi_ev_asynch_enable_gpe,
-                                local_gpe_event_info);
-       if (ACPI_FAILURE(status)) {
-               ACPI_FREE(local_gpe_event_info);
+                                acpi_ev_asynch_enable_gpe, gpe_event_info);
+       if (ACPI_SUCCESS(status)) {
+               return_VOID;
        }
+
+error_exit:
+       acpi_ev_asynch_enable_gpe(gpe_event_info);
        return_VOID;
 }
 
@@ -622,7 +634,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
        (void)acpi_ev_finish_gpe(gpe_event_info);
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 
-       ACPI_FREE(gpe_event_info);
        return;
 }
 
@@ -692,15 +703,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
 
        ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
 
-       /* Invoke global event handler if present */
-
-       acpi_gpe_count++;
-       if (acpi_gbl_global_event_handler) {
-               acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
-                                             gpe_number,
-                                             acpi_gbl_global_event_handler_context);
-       }
-
        /*
         * Always disable the GPE so that it does not keep firing before
         * any asynchronous activity completes (either from the execution
@@ -741,7 +743,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
         * If there is neither a handler nor a method, leave the GPE
         * disabled.
         */
-       switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
+       switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
        case ACPI_GPE_DISPATCH_HANDLER:
 
                /* Invoke the installed handler (at interrupt level) */
index d86699e..e0f24c5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -474,10 +474,12 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                         * Ignore GPEs that have no corresponding _Lxx/_Exx method
                         * and GPEs that are used to wake the system
                         */
-                       if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+                       if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
                             ACPI_GPE_DISPATCH_NONE)
-                           || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
-                               == ACPI_GPE_DISPATCH_HANDLER)
+                           || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+                               ACPI_GPE_DISPATCH_HANDLER)
+                           || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+                               ACPI_GPE_DISPATCH_RAW_HANDLER)
                            || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
                                continue;
                        }
index 7be9283..8840296 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -401,15 +401,17 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
                return_ACPI_STATUS(AE_OK);
        }
 
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-           ACPI_GPE_DISPATCH_HANDLER) {
+       if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+            ACPI_GPE_DISPATCH_HANDLER) ||
+           (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+            ACPI_GPE_DISPATCH_RAW_HANDLER)) {
 
                /* If there is already a handler, ignore this GPE method */
 
                return_ACPI_STATUS(AE_OK);
        }
 
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+       if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
            ACPI_GPE_DISPATCH_METHOD) {
                /*
                 * If there is already a method, ignore this method. But check
index 17e4bbf..3a958f3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -106,53 +106,6 @@ unlock_and_exit:
        return_ACPI_STATUS(status);
 }
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ev_valid_gpe_event
- *
- * PARAMETERS:  gpe_event_info              - Info for this GPE
- *
- * RETURN:      TRUE if the gpe_event is valid
- *
- * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
- *              Should be called only when the GPE lists are semaphore locked
- *              and not subject to change.
- *
- ******************************************************************************/
-
-u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
-{
-       struct acpi_gpe_xrupt_info *gpe_xrupt_block;
-       struct acpi_gpe_block_info *gpe_block;
-
-       ACPI_FUNCTION_ENTRY();
-
-       /* No need for spin lock since we are not changing any list elements */
-
-       /* Walk the GPE interrupt levels */
-
-       gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
-       while (gpe_xrupt_block) {
-               gpe_block = gpe_xrupt_block->gpe_block_list_head;
-
-               /* Walk the GPE blocks on this interrupt level */
-
-               while (gpe_block) {
-                       if ((&gpe_block->event_info[0] <= gpe_event_info) &&
-                           (&gpe_block->event_info[gpe_block->gpe_count] >
-                            gpe_event_info)) {
-                               return (TRUE);
-                       }
-
-                       gpe_block = gpe_block->next;
-               }
-
-               gpe_xrupt_block = gpe_xrupt_block->next;
-       }
-
-       return (FALSE);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_get_gpe_device
@@ -371,8 +324,10 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                                                                 ACPI_GPE_REGISTER_WIDTH)
                                                                + j];
 
-                       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-                           ACPI_GPE_DISPATCH_HANDLER) {
+                       if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+                            ACPI_GPE_DISPATCH_HANDLER) ||
+                           (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+                            ACPI_GPE_DISPATCH_RAW_HANDLER)) {
 
                                /* Delete an installed handler block */
 
@@ -380,10 +335,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                                gpe_event_info->dispatch.handler = NULL;
                                gpe_event_info->flags &=
                                    ~ACPI_GPE_DISPATCH_MASK;
-                       } else
-                           if ((gpe_event_info->
-                                flags & ACPI_GPE_DISPATCH_MASK) ==
-                               ACPI_GPE_DISPATCH_NOTIFY) {
+                       } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
+                                  == ACPI_GPE_DISPATCH_NOTIFY) {
 
                                /* Delete the implicit notification device list */
 
index 78ac293..74e8595 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 24ea342..f7c9dfe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8eb8575..9abace3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b148a4..da32339 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 29630e3..0366703 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 55a58f3..81f2d9e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evxface")
+#if (!ACPI_REDUCED_HARDWARE)
+/* Local prototypes */
+static acpi_status
+acpi_ev_install_gpe_handler(acpi_handle gpe_device,
+                           u32 gpe_number,
+                           u32 type,
+                           u8 is_raw_handler,
+                           acpi_gpe_handler address, void *context);
+
+#endif
 
 
 /*******************************************************************************
@@ -76,6 +86,7 @@ ACPI_MODULE_NAME("evxface")
  *              handlers.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_install_notify_handler(acpi_handle device,
                            u32 handler_type,
@@ -717,32 +728,37 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_install_gpe_handler
+ * FUNCTION:    acpi_ev_install_gpe_handler
  *
  * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
  *                                defined GPEs)
  *              gpe_number      - The GPE number within the GPE block
  *              type            - Whether this GPE should be treated as an
  *                                edge- or level-triggered interrupt.
+ *              is_raw_handler  - Whether this GPE should be handled using
+ *                                the special GPE handler mode.
  *              address         - Address of the handler
  *              context         - Value passed to the handler on each GPE
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Install a handler for a General Purpose Event.
+ * DESCRIPTION: Internal function to install a handler for a General Purpose
+ *              Event.
  *
  ******************************************************************************/
-acpi_status
-acpi_install_gpe_handler(acpi_handle gpe_device,
-                        u32 gpe_number,
-                        u32 type, acpi_gpe_handler address, void *context)
+static acpi_status
+acpi_ev_install_gpe_handler(acpi_handle gpe_device,
+                           u32 gpe_number,
+                           u32 type,
+                           u8 is_raw_handler,
+                           acpi_gpe_handler address, void *context)
 {
        struct acpi_gpe_event_info *gpe_event_info;
        struct acpi_gpe_handler_info *handler;
        acpi_status status;
        acpi_cpu_flags flags;
 
-       ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+       ACPI_FUNCTION_TRACE(ev_install_gpe_handler);
 
        /* Parameter validation */
 
@@ -775,8 +791,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
 
        /* Make sure that there isn't a handler there already */
 
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-           ACPI_GPE_DISPATCH_HANDLER) {
+       if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+            ACPI_GPE_DISPATCH_HANDLER) ||
+           (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
+            ACPI_GPE_DISPATCH_RAW_HANDLER)) {
                status = AE_ALREADY_EXISTS;
                goto free_and_exit;
        }
@@ -793,9 +811,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
         * automatically during initialization, in which case it has to be
         * disabled now to avoid spurious execution of the handler.
         */
-       if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
-            (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
-           gpe_event_info->runtime_count) {
+       if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+             ACPI_GPE_DISPATCH_METHOD) ||
+            (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+             ACPI_GPE_DISPATCH_NOTIFY)) && gpe_event_info->runtime_count) {
                handler->originally_enabled = TRUE;
                (void)acpi_ev_remove_gpe_reference(gpe_event_info);
 
@@ -816,7 +835,10 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
 
        gpe_event_info->flags &=
            ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
-       gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER);
+       gpe_event_info->flags |=
+           (u8)(type |
+                (is_raw_handler ? ACPI_GPE_DISPATCH_RAW_HANDLER :
+                 ACPI_GPE_DISPATCH_HANDLER));
 
        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 
@@ -830,8 +852,76 @@ free_and_exit:
        goto unlock_and_exit;
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_handler
+ *
+ * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
+ *                                defined GPEs)
+ *              gpe_number      - The GPE number within the GPE block
+ *              type            - Whether this GPE should be treated as an
+ *                                edge- or level-triggered interrupt.
+ *              address         - Address of the handler
+ *              context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_install_gpe_handler(acpi_handle gpe_device,
+                        u32 gpe_number,
+                        u32 type, acpi_gpe_handler address, void *context)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
+
+       status =
+           acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, FALSE,
+                                       address, context);
+
+       return_ACPI_STATUS(status);
+}
+
 ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_gpe_raw_handler
+ *
+ * PARAMETERS:  gpe_device      - Namespace node for the GPE (NULL for FADT
+ *                                defined GPEs)
+ *              gpe_number      - The GPE number within the GPE block
+ *              type            - Whether this GPE should be treated as an
+ *                                edge- or level-triggered interrupt.
+ *              address         - Address of the handler
+ *              context         - Value passed to the handler on each GPE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a General Purpose Event.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_gpe_raw_handler(acpi_handle gpe_device,
+                            u32 gpe_number,
+                            u32 type, acpi_gpe_handler address, void *context)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_gpe_raw_handler);
+
+       status = acpi_ev_install_gpe_handler(gpe_device, gpe_number, type, TRUE,
+                                            address, context);
+
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_gpe_raw_handler)
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_remove_gpe_handler
@@ -880,8 +970,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
 
        /* Make sure that a handler is indeed installed */
 
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
-           ACPI_GPE_DISPATCH_HANDLER) {
+       if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+            ACPI_GPE_DISPATCH_HANDLER) &&
+           (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+            ACPI_GPE_DISPATCH_RAW_HANDLER)) {
                status = AE_NOT_EXIST;
                goto unlock_and_exit;
        }
@@ -896,6 +988,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
        /* Remove the handler */
 
        handler = gpe_event_info->dispatch.handler;
+       gpe_event_info->dispatch.handler = NULL;
 
        /* Restore Method node (if any), set dispatch flags */
 
@@ -909,9 +1002,10 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
         * enabled, it should be enabled at this point to restore the
         * post-initialization configuration.
         */
-       if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
-            (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
-           handler->originally_enabled) {
+       if (((ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+             ACPI_GPE_DISPATCH_METHOD) ||
+            (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+             ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
                (void)acpi_ev_add_gpe_reference(gpe_event_info);
        }
 
index bb8cbf5..df06a23 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e889a53..70eb47e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,7 +132,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
         */
        gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
        if (gpe_event_info) {
-               if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+               if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
                    ACPI_GPE_DISPATCH_NONE) {
                        status = acpi_ev_add_gpe_reference(gpe_event_info);
                } else {
@@ -183,6 +183,77 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number)
 
 ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_set_gpe
+ *
+ * PARAMETERS:  gpe_device          - Parent GPE Device. NULL for GPE0/GPE1
+ *              gpe_number          - GPE level within the GPE block
+ *              action              - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
+ *              the reference count mechanism used in the acpi_enable_gpe(),
+ *              acpi_disable_gpe() interfaces.
+ *              This API is typically used by the GPE raw handler mode driver
+ *              to switch between the polling mode and the interrupt mode after
+ *              the driver has enabled the GPE.
+ *              The APIs should be invoked in this order:
+ *               acpi_enable_gpe()            <- Ensure the reference count > 0
+ *               acpi_set_gpe(ACPI_GPE_DISABLE) <- Enter polling mode
+ *               acpi_set_gpe(ACPI_GPE_ENABLE) <- Leave polling mode
+ *               acpi_disable_gpe()           <- Decrease the reference count
+ *
+ * Note: If a GPE is shared by 2 silicon components, then both the drivers
+ *       should support GPE polling mode or disabling the GPE for long period
+ *       for one driver may break the other. So use it with care since all
+ *       firmware _Lxx/_Exx handlers currently rely on the GPE interrupt mode.
+ *
+ ******************************************************************************/
+acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
+{
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_status status;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_set_gpe);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       /* Perform the action */
+
+       switch (action) {
+       case ACPI_GPE_ENABLE:
+
+               status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
+               break;
+
+       case ACPI_GPE_DISABLE:
+
+               status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
+               break;
+
+       default:
+
+               status = AE_BAD_PARAMETER;
+               break;
+       }
+
+unlock_and_exit:
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_set_gpe)
 
 /*******************************************************************************
  *
@@ -313,7 +384,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
         * known as an "implicit notify". Note: The GPE is assumed to be
         * level-triggered (for windows compatibility).
         */
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+       if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
            ACPI_GPE_DISPATCH_NONE) {
                /*
                 * This is the first device for implicit notify on this GPE.
@@ -327,7 +398,7 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
         * If we already have an implicit notify on this GPE, add
         * this device to the notify list.
         */
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+       if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
            ACPI_GPE_DISPATCH_NOTIFY) {
 
                /* Ensure that the device is not already in the list */
@@ -530,6 +601,49 @@ unlock_and_exit:
 
 ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_finish_gpe
+ *
+ * PARAMETERS:  gpe_device          - Namespace node for the GPE Block
+ *                                    (NULL for FADT defined GPEs)
+ *              gpe_number          - GPE level within the GPE block
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE
+ *              processing. Intended for use by asynchronous host-installed
+ *              GPE handlers. The GPE is only reenabled if the enable_for_run bit
+ *              is set in the GPE info.
+ *
+ ******************************************************************************/
+acpi_status acpi_finish_gpe(acpi_handle gpe_device, u32 gpe_number)
+{
+       struct acpi_gpe_event_info *gpe_event_info;
+       acpi_status status;
+       acpi_cpu_flags flags;
+
+       ACPI_FUNCTION_TRACE(acpi_finish_gpe);
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Ensure that we have a valid GPE number */
+
+       gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+       if (!gpe_event_info) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit;
+       }
+
+       status = acpi_ev_finish_gpe(gpe_event_info);
+
+unlock_and_exit:
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_finish_gpe)
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_disable_all_gpes
@@ -604,7 +718,6 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
  *              all GPE blocks.
  *
  ******************************************************************************/
-
 acpi_status acpi_enable_all_wakeup_gpes(void)
 {
        acpi_status status;
index 2d6f187..f21afba 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7d29494..6e0df2b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c545386..89a976b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 95d23da..aaeea48 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6fbfad4..e67d0ac 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0f23c3f..7c213b6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b994845..c161dd9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1d1b27a..4947992 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2207e62..b56fc9d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b49ea2a..472030f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dbb03b5..453b00c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b8e941..7793068 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2ede656..fcc618a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 363767c..b813fed 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 29e9e99..c930edd 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 118e942..4c2836d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cd5288a..0fe188e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ab06026..c7e3b92 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3cde553..b6b7f3a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3af8de3..d2964af 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index daf49f7..a7eee24 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 04bd16c..3101607 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fd11018..6fa3c8d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f7da641..0545065 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d9d72df..3f4225e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1e66d96..e5c5949 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 858fdd6..e5599f6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 494027f..84bc550 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -54,6 +54,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
                                struct acpi_gpe_block_info *gpe_block,
                                void *context);
 
+static acpi_status
+acpi_hw_gpe_enable_write(u8 enable_mask,
+                        struct acpi_gpe_register_info *gpe_register_info);
+
 /******************************************************************************
  *
  * FUNCTION:   acpi_hw_get_gpe_register_bit
@@ -146,7 +150,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
 
        status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
        if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
-               gpe_register_info->enable_mask = enable_mask;
+               gpe_register_info->enable_mask = (u8)enable_mask;
        }
        return (status);
 }
@@ -221,7 +225,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
 
        /* GPE currently handled? */
 
-       if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+       if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
            ACPI_GPE_DISPATCH_NONE) {
                local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
        }
index 6aade8e..c5214de 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a4c34d2..3cf77af 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d590693..7d21cae 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 76ab5c1..675c709 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6b91912..2bd33fe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 96d007d..5f97468 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6921c7f..3b37676 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f1249e3..24fa19a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 607eb9e..e107f92 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 80fcfc8..5d347a7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b55642c..1a8b39c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3d88ef4..80f097e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 42d3710..7dc367e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e634a05..7bcc68f 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a3fb7e4..4a85c45 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7c9d018..bd6cd4a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7eee0a6..d293d97 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a42ee9d..677bc93 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e83cff3..c95a119 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 392910f..0eb5431 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b13b92..8b79958 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7e417aa..151fcd9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b09e6be..c30672d 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index af1cc42..4a9d4a6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4a5e3f5..6ad0200 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4758a1f..c68609a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4bd558b..b6030a2 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8c6c11c..d66c326 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dae9401..7933835 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsxfobj")
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_get_id
- *
- * PARAMETERS:  Handle          - Handle of object whose id is desired
- *              ret_id          - Where the id will be placed
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine returns the owner id associated with a handle
- *
- ******************************************************************************/
-acpi_status acpi_get_id(acpi_handle handle, acpi_owner_id * ret_id)
-{
-       struct acpi_namespace_node *node;
-       acpi_status status;
-
-       /* Parameter Validation */
-
-       if (!ret_id) {
-               return (AE_BAD_PARAMETER);
-       }
-
-       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-       if (ACPI_FAILURE(status)) {
-               return (status);
-       }
-
-       /* Convert and validate the handle */
-
-       node = acpi_ns_validate_handle(handle);
-       if (!node) {
-               (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-               return (AE_BAD_PARAMETER);
-       }
-
-       *ret_id = node->owner_id;
-
-       status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-       return (status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_get_id)
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_type
index 314d314..6d03877 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b058e23..9043722 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a688507..2f5ddd8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1755d2a..1af4a40 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0d8d37f..e18e7c4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6d27b59..a555f7f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 32d250f..9d669cc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0b64181..89984f3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3cd4880..960505a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9cb07e1..ba5f691 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e135aca..841a5ea 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 916fd09..66d406e 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -74,7 +74,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address16[5] = {
         * Address Translation Offset
         * Address Length
         */
-       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.granularity),
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.address16.address.granularity),
         AML_OFFSET(address16.granularity),
         5},
 
@@ -112,7 +112,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address32[5] = {
         * Address Translation Offset
         * Address Length
         */
-       {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.granularity),
+       {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.address32.address.granularity),
         AML_OFFSET(address32.granularity),
         5},
 
@@ -150,7 +150,7 @@ struct acpi_rsconvert_info acpi_rs_convert_address64[5] = {
         * Address Translation Offset
         * Address Length
         */
-       {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.granularity),
+       {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.address64.address.granularity),
         AML_OFFSET(address64.granularity),
         5},
 
@@ -194,7 +194,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
         * Address Length
         * Type-Specific Attribute
         */
-       {ACPI_RSC_MOVE64, ACPI_RS_OFFSET(data.ext_address64.granularity),
+       {ACPI_RSC_MOVE64,
+        ACPI_RS_OFFSET(data.ext_address64.address.granularity),
         AML_OFFSET(ext_address64.granularity),
         6}
 };
index 6895567..cb739a6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 049d9c2..15434e4 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c3c56b5..1539394 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2f9332d..b29d9ec 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -183,15 +183,15 @@ struct acpi_rsdump_info acpi_rs_dump_address16[8] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address16),
         "16-Bit WORD Address Space", NULL},
        {ACPI_RSD_ADDRESS, 0, NULL, NULL},
-       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.granularity), "Granularity",
-        NULL},
-       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.minimum), "Address Minimum",
-        NULL},
-       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.maximum), "Address Maximum",
-        NULL},
-       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.translation_offset),
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.granularity),
+        "Granularity", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.minimum),
+        "Address Minimum", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.maximum),
+        "Address Maximum", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.translation_offset),
         "Translation Offset", NULL},
-       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address_length),
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(address16.address.address_length),
         "Address Length", NULL},
        {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address16.resource_source), NULL, NULL}
 };
@@ -200,15 +200,15 @@ struct acpi_rsdump_info acpi_rs_dump_address32[8] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address32),
         "32-Bit DWORD Address Space", NULL},
        {ACPI_RSD_ADDRESS, 0, NULL, NULL},
-       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.granularity), "Granularity",
-        NULL},
-       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.minimum), "Address Minimum",
-        NULL},
-       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.maximum), "Address Maximum",
-        NULL},
-       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.translation_offset),
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.granularity),
+        "Granularity", NULL},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.minimum),
+        "Address Minimum", NULL},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.maximum),
+        "Address Maximum", NULL},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.translation_offset),
         "Translation Offset", NULL},
-       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address_length),
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(address32.address.address_length),
         "Address Length", NULL},
        {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address32.resource_source), NULL, NULL}
 };
@@ -217,15 +217,15 @@ struct acpi_rsdump_info acpi_rs_dump_address64[8] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_address64),
         "64-Bit QWORD Address Space", NULL},
        {ACPI_RSD_ADDRESS, 0, NULL, NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.granularity), "Granularity",
-        NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.minimum), "Address Minimum",
-        NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.maximum), "Address Maximum",
-        NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.translation_offset),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.granularity),
+        "Granularity", NULL},
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.minimum),
+        "Address Minimum", NULL},
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.maximum),
+        "Address Maximum", NULL},
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.translation_offset),
         "Translation Offset", NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address_length),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(address64.address.address_length),
         "Address Length", NULL},
        {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(address64.resource_source), NULL, NULL}
 };
@@ -234,15 +234,16 @@ struct acpi_rsdump_info acpi_rs_dump_ext_address64[8] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_ext_address64),
         "64-Bit Extended Address Space", NULL},
        {ACPI_RSD_ADDRESS, 0, NULL, NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.granularity),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.granularity),
         "Granularity", NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.minimum),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.minimum),
         "Address Minimum", NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.maximum),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.maximum),
         "Address Maximum", NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.translation_offset),
+       {ACPI_RSD_UINT64,
+        ACPI_RSD_OFFSET(ext_address64.address.translation_offset),
         "Translation Offset", NULL},
-       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address_length),
+       {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.address.address_length),
         "Address Length", NULL},
        {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(ext_address64.type_specific),
         "Type-Specific Attribute", NULL}
index 9d3f8a9..edecfc6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 19d6487..5adba01 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3461f7d..07cfa70 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7729129..50d5be2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index eab4483..c6b8086 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 41eea4b..1fe49d2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9e84072..4c8c6fe 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 897a5ce..ece3cd6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 877ab92..8e6276d 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -60,11 +60,11 @@ ACPI_MODULE_NAME("rsxface")
        ACPI_COPY_FIELD(out, in, min_address_fixed);         \
        ACPI_COPY_FIELD(out, in, max_address_fixed);         \
        ACPI_COPY_FIELD(out, in, info);                      \
-       ACPI_COPY_FIELD(out, in, granularity);               \
-       ACPI_COPY_FIELD(out, in, minimum);                   \
-       ACPI_COPY_FIELD(out, in, maximum);                   \
-       ACPI_COPY_FIELD(out, in, translation_offset);        \
-       ACPI_COPY_FIELD(out, in, address_length);            \
+       ACPI_COPY_FIELD(out, in, address.granularity);       \
+       ACPI_COPY_FIELD(out, in, address.minimum);           \
+       ACPI_COPY_FIELD(out, in, address.maximum);           \
+       ACPI_COPY_FIELD(out, in, address.translation_offset); \
+       ACPI_COPY_FIELD(out, in, address.address_length);    \
        ACPI_COPY_FIELD(out, in, resource_source);
 /* Local prototypes */
 static acpi_status
index f499c10..6a14495 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 41519a9..7d24860 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cb94770..0b879fc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 755b90c..9bad45e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index df3bb20..ef16c06 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6b1ca99..6559a58 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6482b0d..60e94f8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -263,45 +263,6 @@ acpi_get_table_header(char *signature,
 
 ACPI_EXPORT_SYMBOL(acpi_get_table_header)
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_unload_table_id
- *
- * PARAMETERS:  id            - Owner ID of the table to be removed.
- *
- * RETURN:      Status
- *
- * DESCRIPTION: This routine is used to force the unload of a table (by id)
- *
- ******************************************************************************/
-acpi_status acpi_unload_table_id(acpi_owner_id id)
-{
-       int i;
-       acpi_status status = AE_NOT_EXIST;
-
-       ACPI_FUNCTION_TRACE(acpi_unload_table_id);
-
-       /* Find table in the global table list */
-       for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
-               if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
-                       continue;
-               }
-               /*
-                * Delete all namespace objects owned by this table. Note that these
-                * objects can appear anywhere in the namespace by virtue of the AML
-                * "Scope" operator. Thus, we need to track ownership by an ID, not
-                * simply a position within the hierarchy
-                */
-               acpi_tb_delete_namespace_by_owner(i);
-               status = acpi_tb_release_owner_id(i);
-               acpi_tb_set_table_loaded_flag(i, FALSE);
-               break;
-       }
-       return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_get_table_with_size
index ab5308b..aadb300 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 43a54af..eac52cf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a1acec9..1279f50 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index efac83c..61d8f6d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 038ea88..242bd07 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 78fde0a..eacc5ee 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ff601c0..c37ec50 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e516254..57078e3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 40e923e..988e23b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a3516de..71fce38 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8e544d4..9ef80f2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8fed148..6c738fa 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0403dca..743a0ae 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4e263a8..7e1168b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 77ceac7..5e8df91 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9afa944..aa44827 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4b12880..27431cf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 77120ec..e402e07 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dc6e965..089f78b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d44dee6..f9ff100 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2e2bb14..56bbacd 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 82717ff..37b8b58 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dfa9009..7d83efe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 685766f..574cd31 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 36bec57..2959217 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index db30caf..29e4499 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0ce3f5a..82ca914 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bc1ff82..b3505db 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1cc97a7..8274cc1 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6dc54b3..83b6c52 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7d0ee96..130dd9f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4dc3313..c6149a2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 49c873c..0929187 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 88ef77f..306e785 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b1fd688..083a768 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2a0f9e0..f2606af 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c0d44d3..735db11 100644 (file)
@@ -1027,7 +1027,6 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
 
 static struct dev_pm_domain acpi_general_pm_domain = {
        .ops = {
-#ifdef CONFIG_PM
                .runtime_suspend = acpi_subsys_runtime_suspend,
                .runtime_resume = acpi_subsys_runtime_resume,
 #ifdef CONFIG_PM_SLEEP
@@ -1040,7 +1039,6 @@ static struct dev_pm_domain acpi_general_pm_domain = {
                .poweroff = acpi_subsys_suspend,
                .poweroff_late = acpi_subsys_suspend_late,
                .restore_early = acpi_subsys_resume_early,
-#endif
 #endif
        },
 };
index 1b5853f..14d0c89 100644 (file)
@@ -1,8 +1,8 @@
 /*
- *  ec.c - ACPI Embedded Controller Driver (v2.2)
+ *  ec.c - ACPI Embedded Controller Driver (v3)
  *
- *  Copyright (C) 2001-2014 Intel Corporation
- *    Author: 2014       Lv Zheng <lv.zheng@intel.com>
+ *  Copyright (C) 2001-2015 Intel Corporation
+ *    Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
  *            2004       Luming Yu <luming.yu@intel.com>
@@ -31,6 +31,7 @@
 
 /* Uncomment next line to get verbose printout */
 /* #define DEBUG */
+#define DEBUG_REF 0
 #define pr_fmt(fmt) "ACPI : EC: " fmt
 
 #include <linux/kernel.h>
@@ -71,20 +72,32 @@ enum ec_command {
 #define ACPI_EC_DELAY          500     /* Wait 500ms max. during EC ops */
 #define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
 #define ACPI_EC_MSI_UDELAY     550     /* Wait 550us for MSI EC */
+#define ACPI_EC_UDELAY_POLL    1000    /* Wait 1ms for EC transaction polling */
 #define ACPI_EC_CLEAR_MAX      100     /* Maximum number of events to query
                                         * when trying to clear the EC */
 
 enum {
-       EC_FLAGS_QUERY_PENDING,         /* Query is pending */
-       EC_FLAGS_GPE_STORM,             /* GPE storm detected */
+       EC_FLAGS_EVENT_ENABLED,         /* Event is enabled */
+       EC_FLAGS_EVENT_PENDING,         /* Event is pending */
+       EC_FLAGS_EVENT_DETECTED,        /* Event is detected */
        EC_FLAGS_HANDLERS_INSTALLED,    /* Handlers for GPE and
                                         * OpReg are installed */
-       EC_FLAGS_BLOCKED,               /* Transactions are blocked */
+       EC_FLAGS_STARTED,               /* Driver is started */
+       EC_FLAGS_STOPPED,               /* Driver is stopped */
+       EC_FLAGS_COMMAND_STORM,         /* GPE storms occurred to the
+                                        * current command processing */
 };
 
 #define ACPI_EC_COMMAND_POLL           0x01 /* Available for command byte */
 #define ACPI_EC_COMMAND_COMPLETE       0x02 /* Completed last byte */
 
+#define ec_debug_ref(ec, fmt, ...)                                     \
+       do {                                                            \
+               if (DEBUG_REF)                                          \
+                       pr_debug("%lu: " fmt, ec->reference_count,      \
+                                ## __VA_ARGS__);                       \
+       } while (0)
+
 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 module_param(ec_delay, uint, 0644);
@@ -105,6 +118,7 @@ struct acpi_ec_query_handler {
        acpi_handle handle;
        void *data;
        u8 query_bit;
+       struct kref kref;
 };
 
 struct transaction {
@@ -117,8 +131,12 @@ struct transaction {
        u8 wlen;
        u8 rlen;
        u8 flags;
+       unsigned long timestamp;
 };
 
+static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
+static void advance_transaction(struct acpi_ec *ec);
+
 struct acpi_ec *boot_ec, *first_ec;
 EXPORT_SYMBOL(first_ec);
 
@@ -129,7 +147,28 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
 
 /* --------------------------------------------------------------------------
- *                           Transaction Management
+ *                           Device Flags
+ * -------------------------------------------------------------------------- */
+
+static bool acpi_ec_started(struct acpi_ec *ec)
+{
+       return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+              !test_bit(EC_FLAGS_STOPPED, &ec->flags);
+}
+
+static bool acpi_ec_flushed(struct acpi_ec *ec)
+{
+       return ec->reference_count == 1;
+}
+
+static bool acpi_ec_has_pending_event(struct acpi_ec *ec)
+{
+       return test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
+              test_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
+}
+
+/* --------------------------------------------------------------------------
+ *                           EC Registers
  * -------------------------------------------------------------------------- */
 
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
@@ -151,6 +190,7 @@ static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
 
+       ec->curr->timestamp = jiffies;
        pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
        return x;
 }
@@ -159,12 +199,14 @@ static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
        pr_debug("EC_SC(W) = 0x%2.2x\n", command);
        outb(command, ec->command_addr);
+       ec->curr->timestamp = jiffies;
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
        pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
        outb(data, ec->data_addr);
+       ec->curr->timestamp = jiffies;
 }
 
 #ifdef DEBUG
@@ -188,6 +230,203 @@ static const char *acpi_ec_cmd_string(u8 cmd)
 #define acpi_ec_cmd_string(cmd)                "UNDEF"
 #endif
 
+/* --------------------------------------------------------------------------
+ *                           GPE Registers
+ * -------------------------------------------------------------------------- */
+
+static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
+{
+       acpi_event_status gpe_status = 0;
+
+       (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
+       return (gpe_status & ACPI_EVENT_FLAG_SET) ? true : false;
+}
+
+static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
+{
+       if (open)
+               acpi_enable_gpe(NULL, ec->gpe);
+       else {
+               BUG_ON(ec->reference_count < 1);
+               acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+       }
+       if (acpi_ec_is_gpe_raised(ec)) {
+               /*
+                * On some platforms, EN=1 writes cannot trigger GPE. So
+                * software need to manually trigger a pseudo GPE event on
+                * EN=1 writes.
+                */
+               pr_debug("***** Polling quirk *****\n");
+               advance_transaction(ec);
+       }
+}
+
+static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
+{
+       if (close)
+               acpi_disable_gpe(NULL, ec->gpe);
+       else {
+               BUG_ON(ec->reference_count < 1);
+               acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+       }
+}
+
+static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
+{
+       /*
+        * GPE STS is a W1C register, which means:
+        * 1. Software can clear it without worrying about clearing other
+        *    GPEs' STS bits when the hardware sets them in parallel.
+        * 2. As long as software can ensure only clearing it when it is
+        *    set, hardware won't set it in parallel.
+        * So software can clear GPE in any contexts.
+        * Warning: do not move the check into advance_transaction() as the
+        * EC commands will be sent without GPE raised.
+        */
+       if (!acpi_ec_is_gpe_raised(ec))
+               return;
+       acpi_clear_gpe(NULL, ec->gpe);
+}
+
+/* --------------------------------------------------------------------------
+ *                           Transaction Management
+ * -------------------------------------------------------------------------- */
+
+static void acpi_ec_submit_request(struct acpi_ec *ec)
+{
+       ec->reference_count++;
+       if (ec->reference_count == 1)
+               acpi_ec_enable_gpe(ec, true);
+}
+
+static void acpi_ec_complete_request(struct acpi_ec *ec)
+{
+       bool flushed = false;
+
+       ec->reference_count--;
+       if (ec->reference_count == 0)
+               acpi_ec_disable_gpe(ec, true);
+       flushed = acpi_ec_flushed(ec);
+       if (flushed)
+               wake_up(&ec->wait);
+}
+
+static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+{
+       if (!test_bit(flag, &ec->flags)) {
+               acpi_ec_disable_gpe(ec, false);
+               pr_debug("+++++ Polling enabled +++++\n");
+               set_bit(flag, &ec->flags);
+       }
+}
+
+static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+{
+       if (test_bit(flag, &ec->flags)) {
+               clear_bit(flag, &ec->flags);
+               acpi_ec_enable_gpe(ec, false);
+               pr_debug("+++++ Polling disabled +++++\n");
+       }
+}
+
+/*
+ * acpi_ec_submit_flushable_request() - Increase the reference count unless
+ *                                      the flush operation is not in
+ *                                      progress
+ * @ec: the EC device
+ * @allow_event: whether event should be handled
+ *
+ * This function must be used before taking a new action that should hold
+ * the reference count.  If this function returns false, then the action
+ * must be discarded or it will prevent the flush operation from being
+ * completed.
+ *
+ * During flushing, QR_EC command need to pass this check when there is a
+ * pending event, so that the reference count held for the pending event
+ * can be decreased by the completion of the QR_EC command.
+ */
+static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec,
+                                            bool allow_event)
+{
+       if (!acpi_ec_started(ec)) {
+               if (!allow_event || !acpi_ec_has_pending_event(ec))
+                       return false;
+       }
+       acpi_ec_submit_request(ec);
+       return true;
+}
+
+static void acpi_ec_submit_event(struct acpi_ec *ec)
+{
+       if (!test_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags) ||
+           !test_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags))
+               return;
+       /* Hold reference for pending event */
+       if (!acpi_ec_submit_flushable_request(ec, true))
+               return;
+       ec_debug_ref(ec, "Increase event\n");
+       if (!test_and_set_bit(EC_FLAGS_EVENT_PENDING, &ec->flags)) {
+               pr_debug("***** Event query started *****\n");
+               schedule_work(&ec->work);
+               return;
+       }
+       acpi_ec_complete_request(ec);
+       ec_debug_ref(ec, "Decrease event\n");
+}
+
+static void acpi_ec_complete_event(struct acpi_ec *ec)
+{
+       if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
+               clear_bit(EC_FLAGS_EVENT_PENDING, &ec->flags);
+               pr_debug("***** Event query stopped *****\n");
+               /* Unhold reference for pending event */
+               acpi_ec_complete_request(ec);
+               ec_debug_ref(ec, "Decrease event\n");
+               /* Check if there is another SCI_EVT detected */
+               acpi_ec_submit_event(ec);
+       }
+}
+
+static void acpi_ec_submit_detection(struct acpi_ec *ec)
+{
+       /* Hold reference for query submission */
+       if (!acpi_ec_submit_flushable_request(ec, false))
+               return;
+       ec_debug_ref(ec, "Increase query\n");
+       if (!test_and_set_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags)) {
+               pr_debug("***** Event detection blocked *****\n");
+               acpi_ec_submit_event(ec);
+               return;
+       }
+       acpi_ec_complete_request(ec);
+       ec_debug_ref(ec, "Decrease query\n");
+}
+
+static void acpi_ec_complete_detection(struct acpi_ec *ec)
+{
+       if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
+               clear_bit(EC_FLAGS_EVENT_DETECTED, &ec->flags);
+               pr_debug("***** Event detetion unblocked *****\n");
+               /* Unhold reference for query submission */
+               acpi_ec_complete_request(ec);
+               ec_debug_ref(ec, "Decrease query\n");
+       }
+}
+
+static void acpi_ec_enable_event(struct acpi_ec *ec)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       set_bit(EC_FLAGS_EVENT_ENABLED, &ec->flags);
+       /*
+        * An event may be pending even with SCI_EVT=0, so QR_EC should
+        * always be issued right after started.
+        */
+       acpi_ec_submit_detection(ec);
+       spin_unlock_irqrestore(&ec->lock, flags);
+}
+
 static int ec_transaction_completed(struct acpi_ec *ec)
 {
        unsigned long flags;
@@ -200,7 +439,7 @@ static int ec_transaction_completed(struct acpi_ec *ec)
        return ret;
 }
 
-static bool advance_transaction(struct acpi_ec *ec)
+static void advance_transaction(struct acpi_ec *ec)
 {
        struct transaction *t;
        u8 status;
@@ -208,6 +447,12 @@ static bool advance_transaction(struct acpi_ec *ec)
 
        pr_debug("===== %s (%d) =====\n",
                 in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
+       /*
+        * By always clearing STS before handling all indications, we can
+        * ensure a hardware STS 0->1 change after this clearing can always
+        * trigger a GPE interrupt.
+        */
+       acpi_ec_clear_gpe(ec);
        status = acpi_ec_read_status(ec);
        t = ec->curr;
        if (!t)
@@ -223,6 +468,7 @@ static bool advance_transaction(struct acpi_ec *ec)
                                t->rdata[t->ri++] = acpi_ec_read_data(ec);
                                if (t->rlen == t->ri) {
                                        t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                                       acpi_ec_complete_event(ec);
                                        if (t->command == ACPI_EC_COMMAND_QUERY)
                                                pr_debug("***** Command(%s) hardware completion *****\n",
                                                         acpi_ec_cmd_string(t->command));
@@ -233,25 +479,29 @@ static bool advance_transaction(struct acpi_ec *ec)
                } else if (t->wlen == t->wi &&
                           (status & ACPI_EC_FLAG_IBF) == 0) {
                        t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                       acpi_ec_complete_event(ec);
                        wakeup = true;
                }
-               return wakeup;
+               goto out;
        } else {
                if (EC_FLAGS_QUERY_HANDSHAKE &&
                    !(status & ACPI_EC_FLAG_SCI) &&
                    (t->command == ACPI_EC_COMMAND_QUERY)) {
                        t->flags |= ACPI_EC_COMMAND_POLL;
+                       acpi_ec_complete_detection(ec);
                        t->rdata[t->ri++] = 0x00;
                        t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                       acpi_ec_complete_event(ec);
                        pr_debug("***** Command(%s) software completion *****\n",
                                 acpi_ec_cmd_string(t->command));
                        wakeup = true;
                } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
                        acpi_ec_write_cmd(ec, t->command);
                        t->flags |= ACPI_EC_COMMAND_POLL;
+                       acpi_ec_complete_detection(ec);
                } else
                        goto err;
-               return wakeup;
+               goto out;
        }
 err:
        /*
@@ -259,28 +509,27 @@ err:
         * otherwise will take a not handled IRQ as a false one.
         */
        if (!(status & ACPI_EC_FLAG_SCI)) {
-               if (in_interrupt() && t)
-                       ++t->irq_count;
+               if (in_interrupt() && t) {
+                       if (t->irq_count < ec_storm_threshold)
+                               ++t->irq_count;
+                       /* Allow triggering on 0 threshold */
+                       if (t->irq_count == ec_storm_threshold)
+                               acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+               }
        }
-       return wakeup;
+out:
+       if (status & ACPI_EC_FLAG_SCI)
+               acpi_ec_submit_detection(ec);
+       if (wakeup && in_interrupt())
+               wake_up(&ec->wait);
 }
 
 static void start_transaction(struct acpi_ec *ec)
 {
        ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
        ec->curr->flags = 0;
-       (void)advance_transaction(ec);
-}
-
-static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
-
-static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
-{
-       if (state & ACPI_EC_FLAG_SCI) {
-               if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-                       return acpi_ec_sync_query(ec, NULL);
-       }
-       return 0;
+       ec->curr->timestamp = jiffies;
+       advance_transaction(ec);
 }
 
 static int ec_poll(struct acpi_ec *ec)
@@ -291,20 +540,25 @@ static int ec_poll(struct acpi_ec *ec)
        while (repeat--) {
                unsigned long delay = jiffies +
                        msecs_to_jiffies(ec_delay);
+               unsigned long usecs = ACPI_EC_UDELAY_POLL;
                do {
                        /* don't sleep with disabled interrupts */
                        if (EC_FLAGS_MSI || irqs_disabled()) {
-                               udelay(ACPI_EC_MSI_UDELAY);
+                               usecs = ACPI_EC_MSI_UDELAY;
+                               udelay(usecs);
                                if (ec_transaction_completed(ec))
                                        return 0;
                        } else {
                                if (wait_event_timeout(ec->wait,
                                                ec_transaction_completed(ec),
-                                               msecs_to_jiffies(1)))
+                                               usecs_to_jiffies(usecs)))
                                        return 0;
                        }
                        spin_lock_irqsave(&ec->lock, flags);
-                       (void)advance_transaction(ec);
+                       if (time_after(jiffies,
+                                       ec->curr->timestamp +
+                                       usecs_to_jiffies(usecs)))
+                               advance_transaction(ec);
                        spin_unlock_irqrestore(&ec->lock, flags);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
@@ -325,21 +579,29 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
                udelay(ACPI_EC_MSI_UDELAY);
        /* start transaction */
        spin_lock_irqsave(&ec->lock, tmp);
+       /* Enable GPE for command processing (IBF=0/OBF=1) */
+       if (!acpi_ec_submit_flushable_request(ec, true)) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+       ec_debug_ref(ec, "Increase command\n");
        /* following two actions should be kept atomic */
        ec->curr = t;
        pr_debug("***** Command(%s) started *****\n",
                 acpi_ec_cmd_string(t->command));
        start_transaction(ec);
-       if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
-               clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
-               pr_debug("***** Event stopped *****\n");
-       }
        spin_unlock_irqrestore(&ec->lock, tmp);
        ret = ec_poll(ec);
        spin_lock_irqsave(&ec->lock, tmp);
+       if (t->irq_count == ec_storm_threshold)
+               acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
        pr_debug("***** Command(%s) stopped *****\n",
                 acpi_ec_cmd_string(t->command));
        ec->curr = NULL;
+       /* Disable GPE for command processing (IBF=0/OBF=1) */
+       acpi_ec_complete_request(ec);
+       ec_debug_ref(ec, "Decrease command\n");
+unlock:
        spin_unlock_irqrestore(&ec->lock, tmp);
        return ret;
 }
@@ -354,10 +616,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
        if (t->rdata)
                memset(t->rdata, 0, t->rlen);
        mutex_lock(&ec->mutex);
-       if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
-               status = -EINVAL;
-               goto unlock;
-       }
        if (ec->global_lock) {
                status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
                if (ACPI_FAILURE(status)) {
@@ -365,26 +623,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                        goto unlock;
                }
        }
-       /* disable GPE during transaction if storm is detected */
-       if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
-               /* It has to be disabled, so that it doesn't trigger. */
-               acpi_disable_gpe(NULL, ec->gpe);
-       }
 
        status = acpi_ec_transaction_unlocked(ec, t);
 
-       /* check if we received SCI during transaction */
-       ec_check_sci_sync(ec, acpi_ec_read_status(ec));
-       if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+       if (test_bit(EC_FLAGS_COMMAND_STORM, &ec->flags))
                msleep(1);
-               /* It is safe to enable the GPE outside of the transaction. */
-               acpi_enable_gpe(NULL, ec->gpe);
-       } else if (t->irq_count > ec_storm_threshold) {
-               pr_info("GPE storm detected(%d GPEs), "
-                       "transactions will use polling mode\n",
-                       t->irq_count);
-               set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
-       }
        if (ec->global_lock)
                acpi_release_global_lock(glk);
 unlock:
@@ -500,7 +743,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
        u8 value = 0;
 
        for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
-               status = acpi_ec_sync_query(ec, &value);
+               status = acpi_ec_query(ec, &value);
                if (status || !value)
                        break;
        }
@@ -511,6 +754,57 @@ static void acpi_ec_clear(struct acpi_ec *ec)
                pr_info("%d stale EC events cleared\n", i);
 }
 
+static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
+               pr_debug("+++++ Starting EC +++++\n");
+               /* Enable GPE for event processing (SCI_EVT=1) */
+               if (!resuming) {
+                       acpi_ec_submit_request(ec);
+                       ec_debug_ref(ec, "Increase driver\n");
+               }
+               pr_info("+++++ EC started +++++\n");
+       }
+       spin_unlock_irqrestore(&ec->lock, flags);
+}
+
+static bool acpi_ec_stopped(struct acpi_ec *ec)
+{
+       unsigned long flags;
+       bool flushed;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       flushed = acpi_ec_flushed(ec);
+       spin_unlock_irqrestore(&ec->lock, flags);
+       return flushed;
+}
+
+static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ec->lock, flags);
+       if (acpi_ec_started(ec)) {
+               pr_debug("+++++ Stopping EC +++++\n");
+               set_bit(EC_FLAGS_STOPPED, &ec->flags);
+               spin_unlock_irqrestore(&ec->lock, flags);
+               wait_event(ec->wait, acpi_ec_stopped(ec));
+               spin_lock_irqsave(&ec->lock, flags);
+               /* Disable GPE for event processing (SCI_EVT=1) */
+               if (!suspending) {
+                       acpi_ec_complete_request(ec);
+                       ec_debug_ref(ec, "Decrease driver\n");
+               }
+               clear_bit(EC_FLAGS_STARTED, &ec->flags);
+               clear_bit(EC_FLAGS_STOPPED, &ec->flags);
+               pr_info("+++++ EC stopped +++++\n");
+       }
+       spin_unlock_irqrestore(&ec->lock, flags);
+}
+
 void acpi_ec_block_transactions(void)
 {
        struct acpi_ec *ec = first_ec;
@@ -520,7 +814,7 @@ void acpi_ec_block_transactions(void)
 
        mutex_lock(&ec->mutex);
        /* Prevent transactions from being carried out */
-       set_bit(EC_FLAGS_BLOCKED, &ec->flags);
+       acpi_ec_stop(ec, true);
        mutex_unlock(&ec->mutex);
 }
 
@@ -531,14 +825,11 @@ void acpi_ec_unblock_transactions(void)
        if (!ec)
                return;
 
-       mutex_lock(&ec->mutex);
        /* Allow transactions to be carried out again */
-       clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+       acpi_ec_start(ec, true);
 
        if (EC_FLAGS_CLEAR_ON_RESUME)
                acpi_ec_clear(ec);
-
-       mutex_unlock(&ec->mutex);
 }
 
 void acpi_ec_unblock_transactions_early(void)
@@ -548,36 +839,33 @@ void acpi_ec_unblock_transactions_early(void)
         * atomic context during wakeup, so we don't need to acquire the mutex).
         */
        if (first_ec)
-               clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
+               acpi_ec_start(first_ec, true);
 }
 
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data)
+/* --------------------------------------------------------------------------
+                                Event Management
+   -------------------------------------------------------------------------- */
+static struct acpi_ec_query_handler *
+acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
 {
-       int result;
-       u8 d;
-       struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
-                               .wdata = NULL, .rdata = &d,
-                               .wlen = 0, .rlen = 1};
+       if (handler)
+               kref_get(&handler->kref);
+       return handler;
+}
 
-       if (!ec || !data)
-               return -EINVAL;
-       /*
-        * Query the EC to find out which _Qxx method we need to evaluate.
-        * Note that successful completion of the query causes the ACPI_EC_SCI
-        * bit to be cleared (and thus clearing the interrupt source).
-        */
-       result = acpi_ec_transaction_unlocked(ec, &t);
-       if (result)
-               return result;
-       if (!d)
-               return -ENODATA;
-       *data = d;
-       return 0;
+static void acpi_ec_query_handler_release(struct kref *kref)
+{
+       struct acpi_ec_query_handler *handler =
+               container_of(kref, struct acpi_ec_query_handler, kref);
+
+       kfree(handler);
+}
+
+static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
+{
+       kref_put(&handler->kref, acpi_ec_query_handler_release);
 }
 
-/* --------------------------------------------------------------------------
-                                Event Management
-   -------------------------------------------------------------------------- */
 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
                              acpi_handle handle, acpi_ec_query_func func,
                              void *data)
@@ -593,6 +881,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
        handler->func = func;
        handler->data = data;
        mutex_lock(&ec->mutex);
+       kref_init(&handler->kref);
        list_add(&handler->node, &ec->list);
        mutex_unlock(&ec->mutex);
        return 0;
@@ -602,15 +891,18 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
 {
        struct acpi_ec_query_handler *handler, *tmp;
+       LIST_HEAD(free_list);
 
        mutex_lock(&ec->mutex);
        list_for_each_entry_safe(handler, tmp, &ec->list, node) {
                if (query_bit == handler->query_bit) {
-                       list_del(&handler->node);
-                       kfree(handler);
+                       list_del_init(&handler->node);
+                       list_add(&handler->node, &free_list);
                }
        }
        mutex_unlock(&ec->mutex);
+       list_for_each_entry(handler, &free_list, node)
+               acpi_ec_put_query_handler(handler);
 }
 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
 
@@ -626,59 +918,58 @@ static void acpi_ec_run(void *cxt)
        else if (handler->handle)
                acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
        pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
-       kfree(handler);
+       acpi_ec_put_query_handler(handler);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
+static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
-       int status;
-       struct acpi_ec_query_handler *handler, *copy;
+       int result;
+       acpi_status status;
+       struct acpi_ec_query_handler *handler;
+       struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
+                               .wdata = NULL, .rdata = &value,
+                               .wlen = 0, .rlen = 1};
 
-       status = acpi_ec_query_unlocked(ec, &value);
+       /*
+        * Query the EC to find out which _Qxx method we need to evaluate.
+        * Note that successful completion of the query causes the ACPI_EC_SCI
+        * bit to be cleared (and thus clearing the interrupt source).
+        */
+       result = acpi_ec_transaction(ec, &t);
+       if (result)
+               return result;
        if (data)
                *data = value;
-       if (status)
-               return status;
+       if (!value)
+               return -ENODATA;
 
+       mutex_lock(&ec->mutex);
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
                        /* have custom handler for this bit */
-                       copy = kmalloc(sizeof(*handler), GFP_KERNEL);
-                       if (!copy)
-                               return -ENOMEM;
-                       memcpy(copy, handler, sizeof(*copy));
+                       handler = acpi_ec_get_query_handler(handler);
                        pr_debug("##### Query(0x%02x) scheduled #####\n",
                                 handler->query_bit);
-                       return acpi_os_execute((copy->func) ?
+                       status = acpi_os_execute((handler->func) ?
                                OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
-                               acpi_ec_run, copy);
+                               acpi_ec_run, handler);
+                       if (ACPI_FAILURE(status))
+                               result = -EBUSY;
+                       break;
                }
        }
-       return 0;
-}
-
-static void acpi_ec_gpe_query(void *ec_cxt)
-{
-       struct acpi_ec *ec = ec_cxt;
-
-       if (!ec)
-               return;
-       mutex_lock(&ec->mutex);
-       acpi_ec_sync_query(ec, NULL);
        mutex_unlock(&ec->mutex);
+       return result;
 }
 
-static int ec_check_sci(struct acpi_ec *ec, u8 state)
+static void acpi_ec_gpe_poller(struct work_struct *work)
 {
-       if (state & ACPI_EC_FLAG_SCI) {
-               if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-                       pr_debug("***** Event started *****\n");
-                       return acpi_os_execute(OSL_NOTIFY_HANDLER,
-                               acpi_ec_gpe_query, ec);
-               }
-       }
-       return 0;
+       struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
+
+       pr_debug("***** Event poller started *****\n");
+       acpi_ec_query(ec, NULL);
+       pr_debug("***** Event poller stopped *****\n");
 }
 
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -688,11 +979,9 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
        struct acpi_ec *ec = data;
 
        spin_lock_irqsave(&ec->lock, flags);
-       if (advance_transaction(ec))
-               wake_up(&ec->wait);
+       advance_transaction(ec);
        spin_unlock_irqrestore(&ec->lock, flags);
-       ec_check_sci(ec, acpi_ec_read_status(ec));
-       return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+       return ACPI_INTERRUPT_HANDLED;
 }
 
 /* --------------------------------------------------------------------------
@@ -750,11 +1039,11 @@ static struct acpi_ec *make_acpi_ec(void)
 
        if (!ec)
                return NULL;
-       ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
        mutex_init(&ec->mutex);
        init_waitqueue_head(&ec->wait);
        INIT_LIST_HEAD(&ec->list);
        spin_lock_init(&ec->lock);
+       INIT_WORK(&ec->work, acpi_ec_gpe_poller);
        return ec;
 }
 
@@ -810,13 +1099,13 @@ static int ec_install_handlers(struct acpi_ec *ec)
 
        if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
                return 0;
-       status = acpi_install_gpe_handler(NULL, ec->gpe,
+       status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
                                  ACPI_GPE_EDGE_TRIGGERED,
                                  &acpi_ec_gpe_handler, ec);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       acpi_enable_gpe(NULL, ec->gpe);
+       acpi_ec_start(ec, false);
        status = acpi_install_address_space_handler(ec->handle,
                                                    ACPI_ADR_SPACE_EC,
                                                    &acpi_ec_space_handler,
@@ -831,7 +1120,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
                        pr_err("Fail in evaluating the _REG object"
                                " of EC device. Broken bios is suspected.\n");
                } else {
-                       acpi_disable_gpe(NULL, ec->gpe);
+                       acpi_ec_stop(ec, false);
                        acpi_remove_gpe_handler(NULL, ec->gpe,
                                &acpi_ec_gpe_handler);
                        return -ENODEV;
@@ -846,7 +1135,7 @@ static void ec_remove_handlers(struct acpi_ec *ec)
 {
        if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
                return;
-       acpi_disable_gpe(NULL, ec->gpe);
+       acpi_ec_stop(ec, false);
        if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
                pr_err("failed to remove space handler\n");
@@ -900,14 +1189,11 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = ec_install_handlers(ec);
 
        /* EC is fully operational, allow queries */
-       clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+       acpi_ec_enable_event(ec);
 
        /* Clear stale _Q events if hardware might require that */
-       if (EC_FLAGS_CLEAR_ON_RESUME) {
-               mutex_lock(&ec->mutex);
+       if (EC_FLAGS_CLEAR_ON_RESUME)
                acpi_ec_clear(ec);
-               mutex_unlock(&ec->mutex);
-       }
        return ret;
 }
 
index 163e82f..56b321a 100644 (file)
@@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void);
 int acpi_sysfs_init(void);
 void acpi_container_init(void);
 void acpi_memory_hotplug_init(void);
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_ioapic_add(struct acpi_pci_root *root);
+int acpi_ioapic_remove(struct acpi_pci_root *root);
+#else
+static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; }
+static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
+#endif
 #ifdef CONFIG_ACPI_DOCK
 void register_dock_dependent_device(struct acpi_device *adev,
                                    acpi_handle dshandle);
@@ -68,6 +75,8 @@ static inline void acpi_debugfs_init(void) { return; }
 #endif
 void acpi_lpss_init(void);
 
+void acpi_apd_init(void);
+
 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
 bool acpi_queue_hotplug_work(struct work_struct *work);
 void acpi_device_hotplug(struct acpi_device *adev, u32 src);
@@ -122,11 +131,13 @@ struct acpi_ec {
        unsigned long data_addr;
        unsigned long global_lock;
        unsigned long flags;
+       unsigned long reference_count;
        struct mutex mutex;
        wait_queue_head_t wait;
        struct list_head list;
        struct transaction *curr;
        spinlock_t lock;
+       struct work_struct work;
 };
 
 extern struct acpi_ec *first_ec;
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
new file mode 100644 (file)
index 0000000..ccdc8db
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * IOAPIC/IOxAPIC/IOSAPIC driver
+ *
+ * Copyright (C) 2009 Fujitsu Limited.
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on original drivers/pci/ioapic.c
+ *     Yinghai Lu <yinghai@kernel.org>
+ *     Jiang Liu <jiang.liu@intel.com>
+ */
+
+/*
+ * This driver manages I/O APICs added by hotplug after boot.
+ * We try to claim all I/O APIC devices, but those present at boot were
+ * registered when we parsed the ACPI MADT.
+ */
+
+#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt
+
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <acpi/acpi.h>
+
+struct acpi_pci_ioapic {
+       acpi_handle     root_handle;
+       acpi_handle     handle;
+       u32             gsi_base;
+       struct resource res;
+       struct pci_dev  *pdev;
+       struct list_head list;
+};
+
+static LIST_HEAD(ioapic_list);
+static DEFINE_MUTEX(ioapic_list_lock);
+
+static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
+{
+       struct resource *res = data;
+       struct resource_win win;
+
+       res->flags = 0;
+       if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
+               return AE_OK;
+
+       if (!acpi_dev_resource_memory(acpi_res, res)) {
+               if (acpi_dev_resource_address_space(acpi_res, &win) ||
+                   acpi_dev_resource_ext_address_space(acpi_res, &win))
+                       *res = win.res;
+       }
+       if ((res->flags & IORESOURCE_PREFETCH) ||
+           (res->flags & IORESOURCE_DISABLED))
+               res->flags = 0;
+
+       return AE_CTRL_TERMINATE;
+}
+
+static bool acpi_is_ioapic(acpi_handle handle, char **type)
+{
+       acpi_status status;
+       struct acpi_device_info *info;
+       char *hid = NULL;
+       bool match = false;
+
+       if (!acpi_has_method(handle, "_GSB"))
+               return false;
+
+       status = acpi_get_object_info(handle, &info);
+       if (ACPI_SUCCESS(status)) {
+               if (info->valid & ACPI_VALID_HID)
+                       hid = info->hardware_id.string;
+               if (hid) {
+                       if (strcmp(hid, "ACPI0009") == 0) {
+                               *type = "IOxAPIC";
+                               match = true;
+                       } else if (strcmp(hid, "ACPI000A") == 0) {
+                               *type = "IOAPIC";
+                               match = true;
+                       }
+               }
+               kfree(info);
+       }
+
+       return match;
+}
+
+static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl,
+                                    void *context, void **rv)
+{
+       acpi_status status;
+       unsigned long long gsi_base;
+       struct acpi_pci_ioapic *ioapic;
+       struct pci_dev *dev = NULL;
+       struct resource *res = NULL;
+       char *type = NULL;
+
+       if (!acpi_is_ioapic(handle, &type))
+               return AE_OK;
+
+       mutex_lock(&ioapic_list_lock);
+       list_for_each_entry(ioapic, &ioapic_list, list)
+               if (ioapic->handle == handle) {
+                       mutex_unlock(&ioapic_list_lock);
+                       return AE_OK;
+               }
+
+       status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base);
+       if (ACPI_FAILURE(status)) {
+               acpi_handle_warn(handle, "failed to evaluate _GSB method\n");
+               goto exit;
+       }
+
+       ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
+       if (!ioapic) {
+               pr_err("cannot allocate memory for new IOAPIC\n");
+               goto exit;
+       } else {
+               ioapic->root_handle = (acpi_handle)context;
+               ioapic->handle = handle;
+               ioapic->gsi_base = (u32)gsi_base;
+               INIT_LIST_HEAD(&ioapic->list);
+       }
+
+       if (acpi_ioapic_registered(handle, (u32)gsi_base))
+               goto done;
+
+       dev = acpi_get_pci_dev(handle);
+       if (dev && pci_resource_len(dev, 0)) {
+               if (pci_enable_device(dev) < 0)
+                       goto exit_put;
+               pci_set_master(dev);
+               if (pci_request_region(dev, 0, type))
+                       goto exit_disable;
+               res = &dev->resource[0];
+               ioapic->pdev = dev;
+       } else {
+               pci_dev_put(dev);
+               dev = NULL;
+
+               res = &ioapic->res;
+               acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res);
+               if (res->flags == 0) {
+                       acpi_handle_warn(handle, "failed to get resource\n");
+                       goto exit_free;
+               } else if (request_resource(&iomem_resource, res)) {
+                       acpi_handle_warn(handle, "failed to insert resource\n");
+                       goto exit_free;
+               }
+       }
+
+       if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) {
+               acpi_handle_warn(handle, "failed to register IOAPIC\n");
+               goto exit_release;
+       }
+done:
+       list_add(&ioapic->list, &ioapic_list);
+       mutex_unlock(&ioapic_list_lock);
+
+       if (dev)
+               dev_info(&dev->dev, "%s at %pR, GSI %u\n",
+                        type, res, (u32)gsi_base);
+       else
+               acpi_handle_info(handle, "%s at %pR, GSI %u\n",
+                                type, res, (u32)gsi_base);
+
+       return AE_OK;
+
+exit_release:
+       if (dev)
+               pci_release_region(dev, 0);
+       else
+               release_resource(res);
+exit_disable:
+       if (dev)
+               pci_disable_device(dev);
+exit_put:
+       pci_dev_put(dev);
+exit_free:
+       kfree(ioapic);
+exit:
+       mutex_unlock(&ioapic_list_lock);
+       *(acpi_status *)rv = AE_ERROR;
+       return AE_OK;
+}
+
+int acpi_ioapic_add(struct acpi_pci_root *root)
+{
+       acpi_status status, retval = AE_OK;
+
+       status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle,
+                                    UINT_MAX, handle_ioapic_add, NULL,
+                                    root->device->handle, (void **)&retval);
+
+       return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+       int retval = 0;
+       struct acpi_pci_ioapic *ioapic, *tmp;
+
+       mutex_lock(&ioapic_list_lock);
+       list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+               if (root->device->handle != ioapic->root_handle)
+                       continue;
+
+               if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+                       retval = -EBUSY;
+
+               if (ioapic->pdev) {
+                       pci_release_region(ioapic->pdev, 0);
+                       pci_disable_device(ioapic->pdev);
+                       pci_dev_put(ioapic->pdev);
+               } else if (ioapic->res.flags && ioapic->res.parent) {
+                       release_resource(&ioapic->res);
+               }
+               list_del(&ioapic->list);
+               kfree(ioapic);
+       }
+       mutex_unlock(&ioapic_list_lock);
+
+       return retval;
+}
index 24b5476..1333cbd 100644 (file)
@@ -177,12 +177,7 @@ static int __init slit_valid(struct acpi_table_slit *slit)
 
 static int __init acpi_parse_slit(struct acpi_table_header *table)
 {
-       struct acpi_table_slit *slit;
-
-       if (!table)
-               return -EINVAL;
-
-       slit = (struct acpi_table_slit *)table;
+       struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
 
        if (!slit_valid(slit)) {
                printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
@@ -260,11 +255,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 
 static int __init acpi_parse_srat(struct acpi_table_header *table)
 {
-       struct acpi_table_srat *srat;
-       if (!table)
-               return -EINVAL;
+       struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
 
-       srat = (struct acpi_table_srat *)table;
        acpi_srat_revision = srat->header.revision;
 
        /* Real work done in acpi_table_parse_srat below. */
index b1def41..e7f718d 100644 (file)
@@ -485,14 +485,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        if (!pin || !dev->irq_managed || dev->irq <= 0)
                return;
 
-       /* Keep IOAPIC pin configuration when suspending */
-       if (dev->dev.power.is_prepared)
-               return;
-#ifdef CONFIG_PM
-       if (dev->dev.power.runtime_status == RPM_SUSPENDING)
-               return;
-#endif
-
        entry = acpi_pci_irq_lookup(dev, pin);
        if (!entry)
                return;
@@ -513,5 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        if (gsi >= 0) {
                acpi_unregister_gsi(gsi);
                dev->irq_managed = 0;
+               dev->irq = 0;
        }
 }
index c6bcb8c..68a5f71 100644 (file)
@@ -112,10 +112,10 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
        if (ACPI_FAILURE(status))
                return AE_OK;
 
-       if ((address.address_length > 0) &&
+       if ((address.address.address_length > 0) &&
            (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
-               res->start = address.minimum;
-               res->end = address.minimum + address.address_length - 1;
+               res->start = address.address.minimum;
+               res->end = address.address.minimum + address.address.address_length - 1;
        }
 
        return AE_OK;
@@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
        if (hotadd) {
                pcibios_resource_survey_bus(root->bus);
                pci_assign_unassigned_root_bus_resources(root->bus);
+               acpi_ioapic_add(root);
        }
 
        pci_lock_rescan_remove();
@@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
 
        pci_stop_root_bus(root->bus);
 
+       WARN_ON(acpi_ioapic_remove(root));
+
        device_set_run_wake(root->bus->bridge, false);
        pci_acpi_remove_bus_pm_notifier(device);
 
index 02e4839..7962651 100644 (file)
@@ -4,6 +4,10 @@
  *
  *     Alex Chiang <achiang@hp.com>
  *     - Unified x86/ia64 implementations
+ *
+ * I/O APIC hotplug support
+ *     Yinghai Lu <yinghai@kernel.org>
+ *     Jiang Liu <jiang.liu@intel.com>
  */
 #include <linux/export.h>
 #include <linux/acpi.h>
 #define _COMPONENT             ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_core");
 
+static struct acpi_table_madt *get_madt_table(void)
+{
+       static struct acpi_table_madt *madt;
+       static int read_madt;
+
+       if (!read_madt) {
+               if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
+                                       (struct acpi_table_header **)&madt)))
+                       madt = NULL;
+               read_madt++;
+       }
+
+       return madt;
+}
+
 static int map_lapic_id(struct acpi_subtable_header *entry,
                 u32 acpi_id, int *apic_id)
 {
@@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
 static int map_madt_entry(int type, u32 acpi_id)
 {
        unsigned long madt_end, entry;
-       static struct acpi_table_madt *madt;
-       static int read_madt;
        int phys_id = -1;       /* CPU hardware ID */
+       struct acpi_table_madt *madt;
 
-       if (!read_madt) {
-               if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
-                                       (struct acpi_table_header **)&madt)))
-                       madt = NULL;
-               read_madt++;
-       }
-
+       madt = get_madt_table();
        if (!madt)
                return phys_id;
 
@@ -203,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
        return acpi_map_cpuid(phys_id, acpi_id);
 }
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
+                        u64 *phys_addr, int *ioapic_id)
+{
+       struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry;
+
+       if (ioapic->global_irq_base != gsi_base)
+               return 0;
+
+       *phys_addr = ioapic->address;
+       *ioapic_id = ioapic->id;
+       return 1;
+}
+
+static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr)
+{
+       struct acpi_subtable_header *hdr;
+       unsigned long madt_end, entry;
+       struct acpi_table_madt *madt;
+       int apic_id = -1;
+
+       madt = get_madt_table();
+       if (!madt)
+               return apic_id;
+
+       entry = (unsigned long)madt;
+       madt_end = entry + madt->header.length;
+
+       /* Parse all entries looking for a match. */
+       entry += sizeof(struct acpi_table_madt);
+       while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
+               hdr = (struct acpi_subtable_header *)entry;
+               if (hdr->type == ACPI_MADT_TYPE_IO_APIC &&
+                   get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id))
+                       break;
+               else
+                       entry += hdr->length;
+       }
+
+       return apic_id;
+}
+
+static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base,
+                                 u64 *phys_addr)
+{
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct acpi_subtable_header *header;
+       union acpi_object *obj;
+       int apic_id = -1;
+
+       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
+               goto exit;
+
+       if (!buffer.length || !buffer.pointer)
+               goto exit;
+
+       obj = buffer.pointer;
+       if (obj->type != ACPI_TYPE_BUFFER ||
+           obj->buffer.length < sizeof(struct acpi_subtable_header))
+               goto exit;
+
+       header = (struct acpi_subtable_header *)obj->buffer.pointer;
+       if (header->type == ACPI_MADT_TYPE_IO_APIC)
+               get_ioapic_id(header, gsi_base, phys_addr, &apic_id);
+
+exit:
+       kfree(buffer.pointer);
+       return apic_id;
+}
+
+/**
+ * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base
+ * @handle:    ACPI object for IOAPIC device
+ * @gsi_base:  GSI base to match with
+ * @phys_addr: Pointer to store physical address of matching IOAPIC record
+ *
+ * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search
+ * for an ACPI IOAPIC record matching @gsi_base.
+ * Return IOAPIC id and store physical address in @phys_addr if found a match,
+ * otherwise return <0.
+ */
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr)
+{
+       int apic_id;
+
+       apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr);
+       if (apic_id == -1)
+               apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr);
+
+       return apic_id;
+}
+#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */
index 87b704e..c256bd7 100644 (file)
@@ -681,15 +681,13 @@ static int acpi_idle_bm_check(void)
 }
 
 /**
- * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
+ * acpi_idle_do_entry - enter idle state using the appropriate method
  * @cx: cstate data
  *
  * Caller disables interrupt before call and enables interrupt after return.
  */
-static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
+static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 {
-       /* Don't trace irqs off for idle */
-       stop_critical_timings();
        if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
@@ -703,37 +701,8 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
                   gets asserted in time to freeze execution properly. */
                inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
-       start_critical_timings();
 }
 
-/**
- * acpi_idle_enter_c1 - enters an ACPI C1 state-type
- * @dev: the target CPU
- * @drv: cpuidle driver containing cpuidle state info
- * @index: index of target state
- *
- * This is equivalent to the HALT instruction.
- */
-static int acpi_idle_enter_c1(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
-{
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-       lapic_timer_state_broadcast(pr, cx, 1);
-       acpi_idle_do_entry(cx);
-
-       lapic_timer_state_broadcast(pr, cx, 0);
-
-       return index;
-}
-
-
 /**
  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  * @dev: the target CPU
@@ -761,47 +730,11 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
        return 0;
 }
 
-/**
- * acpi_idle_enter_simple - enters an ACPI state without BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver with cpuidle state information
- * @index: the index of suggested state
- */
-static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
+static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
 {
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
-           !pr->flags.has_cst &&
-           !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
-               return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
-       /*
-        * Must be done before busmaster disable as we might need to
-        * access HPET !
-        */
-       lapic_timer_state_broadcast(pr, cx, 1);
-
-       if (cx->type == ACPI_STATE_C3)
-               ACPI_FLUSH_CPU_CACHE();
-
-       /* Tell the scheduler that we are going deep-idle: */
-       sched_clock_idle_sleep_event();
-       acpi_idle_do_entry(cx);
-
-       sched_clock_idle_wakeup_event(0);
-
-       lapic_timer_state_broadcast(pr, cx, 0);
-       return index;
+       return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 &&
+               !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) &&
+               !pr->flags.has_cst;
 }
 
 static int c3_cpu_count;
@@ -809,44 +742,14 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
 
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
- * @dev: the target CPU
- * @drv: cpuidle driver containing state data
- * @index: the index of suggested state
- *
- * If BM is detected, the deepest non-C3 idle state is entered instead.
+ * @pr: Target processor
+ * @cx: Target state context
  */
-static int acpi_idle_enter_bm(struct cpuidle_device *dev,
-               struct cpuidle_driver *drv, int index)
+static void acpi_idle_enter_bm(struct acpi_processor *pr,
+                              struct acpi_processor_cx *cx)
 {
-       struct acpi_processor *pr;
-       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
-
-       pr = __this_cpu_read(processors);
-
-       if (unlikely(!pr))
-               return -EINVAL;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
-           !pr->flags.has_cst &&
-           !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
-               return acpi_idle_enter_c1(dev, drv, CPUIDLE_DRIVER_STATE_START);
-#endif
-
-       if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
-               if (drv->safe_state_index >= 0) {
-                       return drv->states[drv->safe_state_index].enter(dev,
-                                               drv, drv->safe_state_index);
-               } else {
-                       acpi_safe_halt();
-                       return -EBUSY;
-               }
-       }
-
        acpi_unlazy_tlb(smp_processor_id());
 
-       /* Tell the scheduler that we are going deep-idle: */
-       sched_clock_idle_sleep_event();
        /*
         * Must be done before busmaster disable as we might need to
         * access HPET !
@@ -856,37 +759,71 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        /*
         * disable bus master
         * bm_check implies we need ARB_DIS
-        * !bm_check implies we need cache flush
         * bm_control implies whether we can do ARB_DIS
         *
         * That leaves a case where bm_check is set and bm_control is
         * not set. In that case we cannot do much, we enter C3
         * without doing anything.
         */
-       if (pr->flags.bm_check && pr->flags.bm_control) {
+       if (pr->flags.bm_control) {
                raw_spin_lock(&c3_lock);
                c3_cpu_count++;
                /* Disable bus master arbitration when all CPUs are in C3 */
                if (c3_cpu_count == num_online_cpus())
                        acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
                raw_spin_unlock(&c3_lock);
-       } else if (!pr->flags.bm_check) {
-               ACPI_FLUSH_CPU_CACHE();
        }
 
        acpi_idle_do_entry(cx);
 
        /* Re-enable bus master arbitration */
-       if (pr->flags.bm_check && pr->flags.bm_control) {
+       if (pr->flags.bm_control) {
                raw_spin_lock(&c3_lock);
                acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
                c3_cpu_count--;
                raw_spin_unlock(&c3_lock);
        }
 
-       sched_clock_idle_wakeup_event(0);
+       lapic_timer_state_broadcast(pr, cx, 0);
+}
+
+static int acpi_idle_enter(struct cpuidle_device *dev,
+                          struct cpuidle_driver *drv, int index)
+{
+       struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+       struct acpi_processor *pr;
+
+       pr = __this_cpu_read(processors);
+       if (unlikely(!pr))
+               return -EINVAL;
+
+       if (cx->type != ACPI_STATE_C1) {
+               if (acpi_idle_fallback_to_c1(pr)) {
+                       index = CPUIDLE_DRIVER_STATE_START;
+                       cx = per_cpu(acpi_cstate[index], dev->cpu);
+               } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
+                       if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
+                               acpi_idle_enter_bm(pr, cx);
+                               return index;
+                       } else if (drv->safe_state_index >= 0) {
+                               index = drv->safe_state_index;
+                               cx = per_cpu(acpi_cstate[index], dev->cpu);
+                       } else {
+                               acpi_safe_halt();
+                               return -EBUSY;
+                       }
+               }
+       }
+
+       lapic_timer_state_broadcast(pr, cx, 1);
+
+       if (cx->type == ACPI_STATE_C3)
+               ACPI_FLUSH_CPU_CACHE();
+
+       acpi_idle_do_entry(cx);
 
        lapic_timer_state_broadcast(pr, cx, 0);
+
        return index;
 }
 
@@ -981,27 +918,12 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
                strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
                state->exit_latency = cx->latency;
                state->target_residency = cx->latency * latency_factor;
+               state->enter = acpi_idle_enter;
 
                state->flags = 0;
-               switch (cx->type) {
-                       case ACPI_STATE_C1:
-
-                       state->enter = acpi_idle_enter_c1;
-                       state->enter_dead = acpi_idle_play_dead;
-                       drv->safe_state_index = count;
-                       break;
-
-                       case ACPI_STATE_C2:
-                       state->enter = acpi_idle_enter_simple;
+               if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
                        state->enter_dead = acpi_idle_play_dead;
                        drv->safe_state_index = count;
-                       break;
-
-                       case ACPI_STATE_C3:
-                       state->enter = pr->flags.bm_check ?
-                                       acpi_idle_enter_bm :
-                                       acpi_idle_enter_simple;
-                       break;
                }
 
                count++;
index 782a0d1..4752b99 100644 (file)
 #define valid_IRQ(i) (true)
 #endif
 
-static unsigned long acpi_dev_memresource_flags(u64 len, u8 write_protect,
-                                               bool window)
+static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
 {
-       unsigned long flags = IORESOURCE_MEM;
+       u64 reslen = end - start + 1;
 
-       if (len == 0)
-               flags |= IORESOURCE_DISABLED;
+       /*
+        * CHECKME: len might be required to check versus a minimum
+        * length as well. 1 for io is fine, but for memory it does
+        * not make any sense at all.
+        */
+       if (len && reslen && reslen == len && start <= end)
+               return true;
 
-       if (write_protect == ACPI_READ_WRITE_MEMORY)
-               flags |= IORESOURCE_MEM_WRITEABLE;
+       pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+               io ? "io" : "mem", start, end, len);
+
+       return false;
+}
+
+static void acpi_dev_memresource_flags(struct resource *res, u64 len,
+                                      u8 write_protect)
+{
+       res->flags = IORESOURCE_MEM;
 
-       if (window)
-               flags |= IORESOURCE_WINDOW;
+       if (!acpi_dev_resource_len_valid(res->start, res->end, len, false))
+               res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
 
-       return flags;
+       if (write_protect == ACPI_READ_WRITE_MEMORY)
+               res->flags |= IORESOURCE_MEM_WRITEABLE;
 }
 
 static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
@@ -56,7 +69,7 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
 {
        res->start = start;
        res->end = start + len - 1;
-       res->flags = acpi_dev_memresource_flags(len, write_protect, false);
+       acpi_dev_memresource_flags(res, len, write_protect);
 }
 
 /**
@@ -67,6 +80,11 @@ static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len,
  * Check if the given ACPI resource object represents a memory resource and
  * if that's the case, use the information in it to populate the generic
  * resource object pointed to by @res.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
  */
 bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
 {
@@ -77,60 +95,52 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_MEMORY24:
                memory24 = &ares->data.memory24;
-               if (!memory24->minimum && !memory24->address_length)
-                       return false;
-               acpi_dev_get_memresource(res, memory24->minimum,
-                                        memory24->address_length,
+               acpi_dev_get_memresource(res, memory24->minimum << 8,
+                                        memory24->address_length << 8,
                                         memory24->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_MEMORY32:
                memory32 = &ares->data.memory32;
-               if (!memory32->minimum && !memory32->address_length)
-                       return false;
                acpi_dev_get_memresource(res, memory32->minimum,
                                         memory32->address_length,
                                         memory32->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
                fixed_memory32 = &ares->data.fixed_memory32;
-               if (!fixed_memory32->address && !fixed_memory32->address_length)
-                       return false;
                acpi_dev_get_memresource(res, fixed_memory32->address,
                                         fixed_memory32->address_length,
                                         fixed_memory32->write_protect);
                break;
        default:
+               res->flags = 0;
                return false;
        }
-       return true;
+
+       return !(res->flags & IORESOURCE_DISABLED);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_resource_memory);
 
-static unsigned int acpi_dev_ioresource_flags(u64 start, u64 end, u8 io_decode,
-                                             bool window)
+static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
+                                     u8 io_decode)
 {
-       int flags = IORESOURCE_IO;
+       res->flags = IORESOURCE_IO;
 
-       if (io_decode == ACPI_DECODE_16)
-               flags |= IORESOURCE_IO_16BIT_ADDR;
+       if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
+               res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
 
-       if (start > end || end >= 0x10003)
-               flags |= IORESOURCE_DISABLED;
+       if (res->end >= 0x10003)
+               res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
 
-       if (window)
-               flags |= IORESOURCE_WINDOW;
-
-       return flags;
+       if (io_decode == ACPI_DECODE_16)
+               res->flags |= IORESOURCE_IO_16BIT_ADDR;
 }
 
 static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
                                    u8 io_decode)
 {
-       u64 end = start + len - 1;
-
        res->start = start;
-       res->end = end;
-       res->flags = acpi_dev_ioresource_flags(start, end, io_decode, false);
+       res->end = start + len - 1;
+       acpi_dev_ioresource_flags(res, len, io_decode);
 }
 
 /**
@@ -141,6 +151,11 @@ static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len,
  * Check if the given ACPI resource object represents an I/O resource and
  * if that's the case, use the information in it to populate the generic
  * resource object pointed to by @res.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
  */
 bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
 {
@@ -150,135 +165,143 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_IO:
                io = &ares->data.io;
-               if (!io->minimum && !io->address_length)
-                       return false;
                acpi_dev_get_ioresource(res, io->minimum,
                                        io->address_length,
                                        io->io_decode);
                break;
        case ACPI_RESOURCE_TYPE_FIXED_IO:
                fixed_io = &ares->data.fixed_io;
-               if (!fixed_io->address && !fixed_io->address_length)
-                       return false;
                acpi_dev_get_ioresource(res, fixed_io->address,
                                        fixed_io->address_length,
                                        ACPI_DECODE_10);
                break;
        default:
+               res->flags = 0;
                return false;
        }
-       return true;
+
+       return !(res->flags & IORESOURCE_DISABLED);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_resource_io);
 
-/**
- * acpi_dev_resource_address_space - Extract ACPI address space information.
- * @ares: Input ACPI resource object.
- * @res: Output generic resource object.
- *
- * Check if the given ACPI resource object represents an address space resource
- * and if that's the case, use the information in it to populate the generic
- * resource object pointed to by @res.
- */
-bool acpi_dev_resource_address_space(struct acpi_resource *ares,
-                                    struct resource *res)
+static bool acpi_decode_space(struct resource_win *win,
+                             struct acpi_resource_address *addr,
+                             struct acpi_address64_attribute *attr)
 {
-       acpi_status status;
-       struct acpi_resource_address64 addr;
-       bool window;
-       u64 len;
-       u8 io_decode;
+       u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
+       bool wp = addr->info.mem.write_protect;
+       u64 len = attr->address_length;
+       struct resource *res = &win->res;
 
-       switch (ares->type) {
-       case ACPI_RESOURCE_TYPE_ADDRESS16:
-       case ACPI_RESOURCE_TYPE_ADDRESS32:
-       case ACPI_RESOURCE_TYPE_ADDRESS64:
-               break;
-       default:
-               return false;
-       }
+       /*
+        * Filter out invalid descriptor according to ACPI Spec 5.0, section
+        * 6.4.3.5 Address Space Resource Descriptors.
+        */
+       if ((addr->min_address_fixed != addr->max_address_fixed && len) ||
+           (addr->min_address_fixed && addr->max_address_fixed && !len))
+               pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
+                        addr->min_address_fixed, addr->max_address_fixed, len);
 
-       status = acpi_resource_to_address64(ares, &addr);
-       if (ACPI_FAILURE(status))
-               return false;
+       res->start = attr->minimum;
+       res->end = attr->maximum;
 
-       res->start = addr.minimum;
-       res->end = addr.maximum;
-       window = addr.producer_consumer == ACPI_PRODUCER;
+       /*
+        * For bridges that translate addresses across the bridge,
+        * translation_offset is the offset that must be added to the
+        * address on the secondary side to obtain the address on the
+        * primary side. Non-bridge devices must list 0 for all Address
+        * Translation offset bits.
+        */
+       if (addr->producer_consumer == ACPI_PRODUCER) {
+               res->start += attr->translation_offset;
+               res->end += attr->translation_offset;
+       } else if (attr->translation_offset) {
+               pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
+                        attr->translation_offset);
+       }
 
-       switch(addr.resource_type) {
+       switch (addr->resource_type) {
        case ACPI_MEMORY_RANGE:
-               len = addr.maximum - addr.minimum + 1;
-               res->flags = acpi_dev_memresource_flags(len,
-                                               addr.info.mem.write_protect,
-                                               window);
+               acpi_dev_memresource_flags(res, len, wp);
                break;
        case ACPI_IO_RANGE:
-               io_decode = addr.granularity == 0xfff ?
-                               ACPI_DECODE_10 : ACPI_DECODE_16;
-               res->flags = acpi_dev_ioresource_flags(addr.minimum,
-                                                      addr.maximum,
-                                                      io_decode, window);
+               acpi_dev_ioresource_flags(res, len, iodec);
                break;
        case ACPI_BUS_NUMBER_RANGE:
                res->flags = IORESOURCE_BUS;
                break;
        default:
-               res->flags = 0;
+               return false;
        }
 
-       return true;
+       win->offset = attr->translation_offset;
+
+       if (addr->producer_consumer == ACPI_PRODUCER)
+               res->flags |= IORESOURCE_WINDOW;
+
+       if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
+               res->flags |= IORESOURCE_PREFETCH;
+
+       return !(res->flags & IORESOURCE_DISABLED);
+}
+
+/**
+ * acpi_dev_resource_address_space - Extract ACPI address space information.
+ * @ares: Input ACPI resource object.
+ * @win: Output generic resource object.
+ *
+ * Check if the given ACPI resource object represents an address space resource
+ * and if that's the case, use the information in it to populate the generic
+ * resource object pointed to by @win.
+ *
+ * Return:
+ * 1) false with win->res.flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
+ *    resource
+ * 3) true: valid assigned resource
+ */
+bool acpi_dev_resource_address_space(struct acpi_resource *ares,
+                                    struct resource_win *win)
+{
+       struct acpi_resource_address64 addr;
+
+       win->res.flags = 0;
+       if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr)))
+               return false;
+
+       return acpi_decode_space(win, (struct acpi_resource_address *)&addr,
+                                &addr.address);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space);
 
 /**
  * acpi_dev_resource_ext_address_space - Extract ACPI address space information.
  * @ares: Input ACPI resource object.
- * @res: Output generic resource object.
+ * @win: Output generic resource object.
  *
  * Check if the given ACPI resource object represents an extended address space
  * resource and if that's the case, use the information in it to populate the
- * generic resource object pointed to by @res.
+ * generic resource object pointed to by @win.
+ *
+ * Return:
+ * 1) false with win->res.flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned
+ *    resource
+ * 3) true: valid assigned resource
  */
 bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
-                                        struct resource *res)
+                                        struct resource_win *win)
 {
        struct acpi_resource_extended_address64 *ext_addr;
-       bool window;
-       u64 len;
-       u8 io_decode;
 
+       win->res.flags = 0;
        if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64)
                return false;
 
        ext_addr = &ares->data.ext_address64;
 
-       res->start = ext_addr->minimum;
-       res->end = ext_addr->maximum;
-       window = ext_addr->producer_consumer == ACPI_PRODUCER;
-
-       switch(ext_addr->resource_type) {
-       case ACPI_MEMORY_RANGE:
-               len = ext_addr->maximum - ext_addr->minimum + 1;
-               res->flags = acpi_dev_memresource_flags(len,
-                                       ext_addr->info.mem.write_protect,
-                                       window);
-               break;
-       case ACPI_IO_RANGE:
-               io_decode = ext_addr->granularity == 0xfff ?
-                               ACPI_DECODE_10 : ACPI_DECODE_16;
-               res->flags = acpi_dev_ioresource_flags(ext_addr->minimum,
-                                                      ext_addr->maximum,
-                                                      io_decode, window);
-               break;
-       case ACPI_BUS_NUMBER_RANGE:
-               res->flags = IORESOURCE_BUS;
-               break;
-       default:
-               res->flags = 0;
-       }
-
-       return true;
+       return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr,
+                                &ext_addr->address);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
 
@@ -310,7 +333,7 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
 {
        res->start = gsi;
        res->end = gsi;
-       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED;
+       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
 }
 
 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
@@ -369,6 +392,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
  * represented by the resource and populate the generic resource object pointed
  * to by @res accordingly.  If the registration of the GSI is not successful,
  * IORESOURCE_DISABLED will be set it that object's flags.
+ *
+ * Return:
+ * 1) false with res->flags setting to zero: not the expected resource type
+ * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource
+ * 3) true: valid assigned resource
  */
 bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                                 struct resource *res)
@@ -402,6 +430,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                                         ext_irq->sharable, false);
                break;
        default:
+               res->flags = 0;
                return false;
        }
 
@@ -415,12 +444,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt);
  */
 void acpi_dev_free_resource_list(struct list_head *list)
 {
-       struct resource_list_entry *rentry, *re;
-
-       list_for_each_entry_safe(rentry, re, list, node) {
-               list_del(&rentry->node);
-               kfree(rentry);
-       }
+       resource_list_free(list);
 }
 EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list);
 
@@ -432,18 +456,19 @@ struct res_proc_context {
        int error;
 };
 
-static acpi_status acpi_dev_new_resource_entry(struct resource *r,
+static acpi_status acpi_dev_new_resource_entry(struct resource_win *win,
                                               struct res_proc_context *c)
 {
-       struct resource_list_entry *rentry;
+       struct resource_entry *rentry;
 
-       rentry = kmalloc(sizeof(*rentry), GFP_KERNEL);
+       rentry = resource_list_create_entry(NULL, 0);
        if (!rentry) {
                c->error = -ENOMEM;
                return AE_NO_MEMORY;
        }
-       rentry->res = *r;
-       list_add_tail(&rentry->node, c->list);
+       *rentry->res = win->res;
+       rentry->offset = win->offset;
+       resource_list_add_tail(rentry, c->list);
        c->count++;
        return AE_OK;
 }
@@ -452,7 +477,8 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
                                             void *context)
 {
        struct res_proc_context *c = context;
-       struct resource r;
+       struct resource_win win;
+       struct resource *res = &win.res;
        int i;
 
        if (c->preproc) {
@@ -467,18 +493,18 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
                }
        }
 
-       memset(&r, 0, sizeof(r));
+       memset(&win, 0, sizeof(win));
 
-       if (acpi_dev_resource_memory(ares, &r)
-           || acpi_dev_resource_io(ares, &r)
-           || acpi_dev_resource_address_space(ares, &r)
-           || acpi_dev_resource_ext_address_space(ares, &r))
-               return acpi_dev_new_resource_entry(&r, c);
+       if (acpi_dev_resource_memory(ares, res)
+           || acpi_dev_resource_io(ares, res)
+           || acpi_dev_resource_address_space(ares, &win)
+           || acpi_dev_resource_ext_address_space(ares, &win))
+               return acpi_dev_new_resource_entry(&win, c);
 
-       for (i = 0; acpi_dev_resource_interrupt(ares, i, &r); i++) {
+       for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) {
                acpi_status status;
 
-               status = acpi_dev_new_resource_entry(&r, c);
+               status = acpi_dev_new_resource_entry(&win, c);
                if (ACPI_FAILURE(status))
                        return status;
        }
@@ -503,7 +529,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
  * returned as the final error code.
  *
  * The resultant struct resource objects are put on the list pointed to by
- * @list, that must be empty initially, as members of struct resource_list_entry
+ * @list, that must be empty initially, as members of struct resource_entry
  * objects.  Callers of this routine should use %acpi_dev_free_resource_list() to
  * free that list.
  *
@@ -538,3 +564,58 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
        return c.count;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+
+/**
+ * acpi_dev_filter_resource_type - Filter ACPI resource according to resource
+ *                                types
+ * @ares: Input ACPI resource object.
+ * @types: Valid resource types of IORESOURCE_XXX
+ *
+ * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * ACPI resource objects according to resource types.
+ */
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+                                 unsigned long types)
+{
+       unsigned long type = 0;
+
+       switch (ares->type) {
+       case ACPI_RESOURCE_TYPE_MEMORY24:
+       case ACPI_RESOURCE_TYPE_MEMORY32:
+       case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+               type = IORESOURCE_MEM;
+               break;
+       case ACPI_RESOURCE_TYPE_IO:
+       case ACPI_RESOURCE_TYPE_FIXED_IO:
+               type = IORESOURCE_IO;
+               break;
+       case ACPI_RESOURCE_TYPE_IRQ:
+       case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+               type = IORESOURCE_IRQ;
+               break;
+       case ACPI_RESOURCE_TYPE_DMA:
+       case ACPI_RESOURCE_TYPE_FIXED_DMA:
+               type = IORESOURCE_DMA;
+               break;
+       case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
+               type = IORESOURCE_REG;
+               break;
+       case ACPI_RESOURCE_TYPE_ADDRESS16:
+       case ACPI_RESOURCE_TYPE_ADDRESS32:
+       case ACPI_RESOURCE_TYPE_ADDRESS64:
+       case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
+               if (ares->data.address.resource_type == ACPI_MEMORY_RANGE)
+                       type = IORESOURCE_MEM;
+               else if (ares->data.address.resource_type == ACPI_IO_RANGE)
+                       type = IORESOURCE_IO;
+               else if (ares->data.address.resource_type ==
+                        ACPI_BUS_NUMBER_RANGE)
+                       type = IORESOURCE_BUS;
+               break;
+       default:
+               break;
+       }
+
+       return (type & types) ? 0 : 1;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
index dc4d896..bbca783 100644 (file)
@@ -2544,6 +2544,7 @@ int __init acpi_scan_init(void)
        acpi_pci_link_init();
        acpi_processor_init();
        acpi_lpss_init();
+       acpi_apd_init();
        acpi_cmos_rtc_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
index 8aa9254..7f251dd 100644 (file)
@@ -321,7 +321,7 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
        {},
 };
 
-static void acpi_sleep_dmi_check(void)
+static void __init acpi_sleep_dmi_check(void)
 {
        int year;
 
index 032db45..88a4f99 100644 (file)
@@ -522,6 +522,24 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
                },
        },
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1186097 */
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 3570R/370R/470R/450R/510R/4450RV",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "3570R/370R/470R/450R/510R/4450RV"),
+               },
+       },
+       {
+        /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 730U3E/740U3E",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
+               },
+       },
 
        {
         /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
index b0f1388..f32b802 100644 (file)
@@ -19,8 +19,8 @@
  * @dev: Device to handle.
  *
  * If power.subsys_data is NULL, point it to a new object, otherwise increment
- * its reference counter.  Return 1 if a new object has been created, otherwise
- * return 0 or error code.
+ * its reference counter.  Return 0 if new object has been created or refcount
+ * increased, otherwise negative error code.
  */
 int dev_pm_get_subsys_data(struct device *dev)
 {
@@ -56,13 +56,11 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
  * @dev: Device to handle.
  *
  * If the reference counter of power.subsys_data is zero after dropping the
- * reference, power.subsys_data is removed.  Return 1 if that happens or 0
- * otherwise.
+ * reference, power.subsys_data is removed.
  */
-int dev_pm_put_subsys_data(struct device *dev)
+void dev_pm_put_subsys_data(struct device *dev)
 {
        struct pm_subsys_data *psd;
-       int ret = 1;
 
        spin_lock_irq(&dev->power.lock);
 
@@ -70,18 +68,14 @@ int dev_pm_put_subsys_data(struct device *dev)
        if (!psd)
                goto out;
 
-       if (--psd->refcount == 0) {
+       if (--psd->refcount == 0)
                dev->power.subsys_data = NULL;
-       } else {
+       else
                psd = NULL;
-               ret = 0;
-       }
 
  out:
        spin_unlock_irq(&dev->power.lock);
        kfree(psd);
-
-       return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
 
index 0d8780c..ba4abbe 100644 (file)
@@ -344,14 +344,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
        struct device *dev;
 
        gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
-
-       mutex_lock(&gpd_data->lock);
        dev = gpd_data->base.dev;
-       if (!dev) {
-               mutex_unlock(&gpd_data->lock);
-               return NOTIFY_DONE;
-       }
-       mutex_unlock(&gpd_data->lock);
 
        for (;;) {
                struct generic_pm_domain *genpd;
@@ -1384,25 +1377,66 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
 
 #endif /* CONFIG_PM_SLEEP */
 
-static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+                                       struct generic_pm_domain *genpd,
+                                       struct gpd_timing_data *td)
 {
        struct generic_pm_domain_data *gpd_data;
+       int ret;
+
+       ret = dev_pm_get_subsys_data(dev);
+       if (ret)
+               return ERR_PTR(ret);
 
        gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
-       if (!gpd_data)
-               return NULL;
+       if (!gpd_data) {
+               ret = -ENOMEM;
+               goto err_put;
+       }
+
+       if (td)
+               gpd_data->td = *td;
 
-       mutex_init(&gpd_data->lock);
+       gpd_data->base.dev = dev;
+       gpd_data->need_restore = -1;
+       gpd_data->td.constraint_changed = true;
+       gpd_data->td.effective_constraint_ns = -1;
        gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
-       dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+
+       spin_lock_irq(&dev->power.lock);
+
+       if (dev->power.subsys_data->domain_data) {
+               ret = -EINVAL;
+               goto err_free;
+       }
+
+       dev->power.subsys_data->domain_data = &gpd_data->base;
+       dev->pm_domain = &genpd->domain;
+
+       spin_unlock_irq(&dev->power.lock);
+
        return gpd_data;
+
+ err_free:
+       spin_unlock_irq(&dev->power.lock);
+       kfree(gpd_data);
+ err_put:
+       dev_pm_put_subsys_data(dev);
+       return ERR_PTR(ret);
 }
 
-static void __pm_genpd_free_dev_data(struct device *dev,
-                                    struct generic_pm_domain_data *gpd_data)
+static void genpd_free_dev_data(struct device *dev,
+                               struct generic_pm_domain_data *gpd_data)
 {
-       dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+       spin_lock_irq(&dev->power.lock);
+
+       dev->pm_domain = NULL;
+       dev->power.subsys_data->domain_data = NULL;
+
+       spin_unlock_irq(&dev->power.lock);
+
        kfree(gpd_data);
+       dev_pm_put_subsys_data(dev);
 }
 
 /**
@@ -1414,8 +1448,7 @@ static void __pm_genpd_free_dev_data(struct device *dev,
 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
                          struct gpd_timing_data *td)
 {
-       struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
-       struct pm_domain_data *pdd;
+       struct generic_pm_domain_data *gpd_data;
        int ret = 0;
 
        dev_dbg(dev, "%s()\n", __func__);
@@ -1423,9 +1456,9 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
        if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
                return -EINVAL;
 
-       gpd_data_new = __pm_genpd_alloc_dev_data(dev);
-       if (!gpd_data_new)
-               return -ENOMEM;
+       gpd_data = genpd_alloc_dev_data(dev, genpd, td);
+       if (IS_ERR(gpd_data))
+               return PTR_ERR(gpd_data);
 
        genpd_acquire_lock(genpd);
 
@@ -1434,50 +1467,22 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
                goto out;
        }
 
-       list_for_each_entry(pdd, &genpd->dev_list, list_node)
-               if (pdd->dev == dev) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-       ret = dev_pm_get_subsys_data(dev);
+       ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
        if (ret)
                goto out;
 
        genpd->device_count++;
        genpd->max_off_time_changed = true;
 
-       spin_lock_irq(&dev->power.lock);
-
-       dev->pm_domain = &genpd->domain;
-       if (dev->power.subsys_data->domain_data) {
-               gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
-       } else {
-               gpd_data = gpd_data_new;
-               dev->power.subsys_data->domain_data = &gpd_data->base;
-       }
-       gpd_data->refcount++;
-       if (td)
-               gpd_data->td = *td;
-
-       spin_unlock_irq(&dev->power.lock);
-
-       if (genpd->attach_dev)
-               genpd->attach_dev(genpd, dev);
-
-       mutex_lock(&gpd_data->lock);
-       gpd_data->base.dev = dev;
        list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
-       gpd_data->need_restore = -1;
-       gpd_data->td.constraint_changed = true;
-       gpd_data->td.effective_constraint_ns = -1;
-       mutex_unlock(&gpd_data->lock);
 
  out:
        genpd_release_lock(genpd);
 
-       if (gpd_data != gpd_data_new)
-               __pm_genpd_free_dev_data(dev, gpd_data_new);
+       if (ret)
+               genpd_free_dev_data(dev, gpd_data);
+       else
+               dev_pm_qos_add_notifier(dev, &gpd_data->nb);
 
        return ret;
 }
@@ -1504,7 +1509,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 {
        struct generic_pm_domain_data *gpd_data;
        struct pm_domain_data *pdd;
-       bool remove = false;
        int ret = 0;
 
        dev_dbg(dev, "%s()\n", __func__);
@@ -1514,6 +1518,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
            ||  pd_to_genpd(dev->pm_domain) != genpd)
                return -EINVAL;
 
+       /* The above validation also means we have existing domain_data. */
+       pdd = dev->power.subsys_data->domain_data;
+       gpd_data = to_gpd_data(pdd);
+       dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+
        genpd_acquire_lock(genpd);
 
        if (genpd->prepared_count > 0) {
@@ -1527,57 +1536,21 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
        if (genpd->detach_dev)
                genpd->detach_dev(genpd, dev);
 
-       spin_lock_irq(&dev->power.lock);
-
-       dev->pm_domain = NULL;
-       pdd = dev->power.subsys_data->domain_data;
        list_del_init(&pdd->list_node);
-       gpd_data = to_gpd_data(pdd);
-       if (--gpd_data->refcount == 0) {
-               dev->power.subsys_data->domain_data = NULL;
-               remove = true;
-       }
-
-       spin_unlock_irq(&dev->power.lock);
-
-       mutex_lock(&gpd_data->lock);
-       pdd->dev = NULL;
-       mutex_unlock(&gpd_data->lock);
 
        genpd_release_lock(genpd);
 
-       dev_pm_put_subsys_data(dev);
-       if (remove)
-               __pm_genpd_free_dev_data(dev, gpd_data);
+       genpd_free_dev_data(dev, gpd_data);
 
        return 0;
 
  out:
        genpd_release_lock(genpd);
+       dev_pm_qos_add_notifier(dev, &gpd_data->nb);
 
        return ret;
 }
 
-/**
- * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "need restore" flag.
- */
-void pm_genpd_dev_need_restore(struct device *dev, bool val)
-{
-       struct pm_subsys_data *psd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->power.lock, flags);
-
-       psd = dev_to_psd(dev);
-       if (psd && psd->domain_data)
-               to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
-
-       spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
-
 /**
  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
  * @genpd: Master PM domain to add the subdomain to.
index 106c693..15bf299 100644 (file)
@@ -117,20 +117,20 @@ do {                                                                      \
 } while (0)
 
 /**
- * find_device_opp() - find device_opp struct using device pointer
+ * _find_device_opp() - find device_opp struct using device pointer
  * @dev:       device pointer used to lookup device OPPs
  *
  * Search list of device OPPs for one containing matching device. Does a RCU
  * reader operation to grab the pointer needed.
  *
- * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
  * -EINVAL based on type of error.
  *
  * Locking: This function must be called under rcu_read_lock(). device_opp
  * is a RCU protected pointer. This means that device_opp is valid as long
  * as we are under RCU lock.
  */
-static struct device_opp *find_device_opp(struct device *dev)
+static struct device_opp *_find_device_opp(struct device *dev)
 {
        struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
 
@@ -153,7 +153,7 @@ static struct device_opp *find_device_opp(struct device *dev)
  * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
  * @opp:       opp for which voltage has to be returned for
  *
- * Return voltage in micro volt corresponding to the opp, else
+ * Return: voltage in micro volt corresponding to the opp, else
  * return 0
  *
  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -169,6 +169,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
        struct dev_pm_opp *tmp_opp;
        unsigned long v = 0;
 
+       opp_rcu_lockdep_assert();
+
        tmp_opp = rcu_dereference(opp);
        if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
                pr_err("%s: Invalid parameters\n", __func__);
@@ -183,7 +185,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  * @opp:       opp for which frequency has to be returned for
  *
- * Return frequency in hertz corresponding to the opp, else
+ * Return: frequency in hertz corresponding to the opp, else
  * return 0
  *
  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
@@ -199,6 +201,8 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
        struct dev_pm_opp *tmp_opp;
        unsigned long f = 0;
 
+       opp_rcu_lockdep_assert();
+
        tmp_opp = rcu_dereference(opp);
        if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
                pr_err("%s: Invalid parameters\n", __func__);
@@ -213,7 +217,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:       device for which we do this operation
  *
- * This function returns the number of available opps if there are any,
+ * Return: This function returns the number of available opps if there are any,
  * else returns 0 if none or the corresponding error value.
  *
  * Locking: This function takes rcu_read_lock().
@@ -226,7 +230,7 @@ int dev_pm_opp_get_opp_count(struct device *dev)
 
        rcu_read_lock();
 
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                count = PTR_ERR(dev_opp);
                dev_err(dev, "%s: device OPP not found (%d)\n",
@@ -251,9 +255,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  * @freq:              frequency to search for
  * @available:         true/false - match for available opp
  *
- * Searches for exact match in the opp list and returns pointer to the matching
- * opp if found, else returns ERR_PTR in case of error and should be handled
- * using IS_ERR. Error return values can be:
+ * Return: Searches for exact match in the opp list and returns pointer to the
+ * matching opp if found, else returns ERR_PTR in case of error and should
+ * be handled using IS_ERR. Error return values can be:
  * EINVAL:     for bad pointer
  * ERANGE:     no match found for search
  * ENODEV:     if device not found in list of registered devices
@@ -280,7 +284,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 
        opp_rcu_lockdep_assert();
 
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                int r = PTR_ERR(dev_opp);
                dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
@@ -307,7 +311,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  * Search for the matching ceil *available* OPP from a starting freq
  * for a device.
  *
- * Returns matching *opp and refreshes *freq accordingly, else returns
+ * Return: matching *opp and refreshes *freq accordingly, else returns
  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  * values can be:
  * EINVAL:     for bad pointer
@@ -333,7 +337,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
                return ERR_PTR(-EINVAL);
        }
 
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp))
                return ERR_CAST(dev_opp);
 
@@ -357,7 +361,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  * Search for the matching floor *available* OPP from a starting freq
  * for a device.
  *
- * Returns matching *opp and refreshes *freq accordingly, else returns
+ * Return: matching *opp and refreshes *freq accordingly, else returns
  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  * values can be:
  * EINVAL:     for bad pointer
@@ -383,7 +387,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
                return ERR_PTR(-EINVAL);
        }
 
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp))
                return ERR_CAST(dev_opp);
 
@@ -403,7 +407,16 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
-static struct device_opp *add_device_opp(struct device *dev)
+/**
+ * _add_device_opp() - Allocate a new device OPP table
+ * @dev:       device for which we do this operation
+ *
+ * New device node which uses OPPs - used when multiple devices with OPP tables
+ * are maintained.
+ *
+ * Return: valid device_opp pointer if success, else NULL.
+ */
+static struct device_opp *_add_device_opp(struct device *dev)
 {
        struct device_opp *dev_opp;
 
@@ -424,8 +437,35 @@ static struct device_opp *add_device_opp(struct device *dev)
        return dev_opp;
 }
 
-static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
-                                 unsigned long u_volt, bool dynamic)
+/**
+ * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * @dev:       device for which we do this operation
+ * @freq:      Frequency in Hz for this OPP
+ * @u_volt:    Voltage in uVolts for this OPP
+ * @dynamic:   Dynamically added OPPs.
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ *
+ * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
+ * freed by of_free_opp_table.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ */
+static int _opp_add_dynamic(struct device *dev, unsigned long freq,
+                           long u_volt, bool dynamic)
 {
        struct device_opp *dev_opp = NULL;
        struct dev_pm_opp *opp, *new_opp;
@@ -449,9 +489,9 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq,
        new_opp->dynamic = dynamic;
 
        /* Check for existing list for 'dev' */
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
-               dev_opp = add_device_opp(dev);
+               dev_opp = _add_device_opp(dev);
                if (!dev_opp) {
                        ret = -ENOMEM;
                        goto free_opp;
@@ -519,34 +559,53 @@ free_opp:
  * mutex cannot be locked.
  *
  * Return:
- * 0:          On success OR
+ * 0           On success OR
  *             Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST:    Freq are same and volt are different OR
+ * -EEXIST     Freq are same and volt are different OR
  *             Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM:    Memory allocation failure
+ * -ENOMEM     Memory allocation failure
  */
 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 {
-       return dev_pm_opp_add_dynamic(dev, freq, u_volt, true);
+       return _opp_add_dynamic(dev, freq, u_volt, true);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 
-static void kfree_opp_rcu(struct rcu_head *head)
+/**
+ * _kfree_opp_rcu() - Free OPP RCU handler
+ * @head:      RCU head
+ */
+static void _kfree_opp_rcu(struct rcu_head *head)
 {
        struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
 
        kfree_rcu(opp, rcu_head);
 }
 
-static void kfree_device_rcu(struct rcu_head *head)
+/**
+ * _kfree_device_rcu() - Free device_opp RCU handler
+ * @head:      RCU head
+ */
+static void _kfree_device_rcu(struct rcu_head *head)
 {
        struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
 
        kfree_rcu(device_opp, rcu_head);
 }
 
-static void __dev_pm_opp_remove(struct device_opp *dev_opp,
-                               struct dev_pm_opp *opp)
+/**
+ * _opp_remove()  - Remove an OPP from a table definition
+ * @dev_opp:   points back to the device_opp struct this opp belongs to
+ * @opp:       pointer to the OPP to remove
+ *
+ * This function removes an opp definition from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * It is assumed that the caller holds required mutex for an RCU updater
+ * strategy.
+ */
+static void _opp_remove(struct device_opp *dev_opp,
+                       struct dev_pm_opp *opp)
 {
        /*
         * Notify the changes in the availability of the operable
@@ -554,12 +613,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
         */
        srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
        list_del_rcu(&opp->node);
-       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
+       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
        if (list_empty(&dev_opp->opp_list)) {
                list_del_rcu(&dev_opp->node);
                call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
-                         kfree_device_rcu);
+                         _kfree_device_rcu);
        }
 }
 
@@ -569,6 +628,12 @@ static void __dev_pm_opp_remove(struct device_opp *dev_opp,
  * @freq:      OPP to remove with matching 'freq'
  *
  * This function removes an opp from the opp list.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
  */
 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 {
@@ -579,7 +644,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
        /* Hold our list modification lock here */
        mutex_lock(&dev_opp_list_lock);
 
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp))
                goto unlock;
 
@@ -596,14 +661,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
                goto unlock;
        }
 
-       __dev_pm_opp_remove(dev_opp, opp);
+       _opp_remove(dev_opp, opp);
 unlock:
        mutex_unlock(&dev_opp_list_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
 /**
- * opp_set_availability() - helper to set the availability of an opp
+ * _opp_set_availability() - helper to set the availability of an opp
  * @dev:               device for which we do this operation
  * @freq:              OPP frequency to modify availability
  * @availability_req:  availability status requested for this opp
@@ -611,7 +676,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  * share a common logic which is isolated here.
  *
- * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  * copy operation, returns 0 if no modifcation was done OR modification was
  * successful.
  *
@@ -621,8 +686,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-static int opp_set_availability(struct device *dev, unsigned long freq,
-               bool availability_req)
+static int _opp_set_availability(struct device *dev, unsigned long freq,
+                                bool availability_req)
 {
        struct device_opp *dev_opp;
        struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
@@ -638,7 +703,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
        mutex_lock(&dev_opp_list_lock);
 
        /* Find the device_opp */
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                r = PTR_ERR(dev_opp);
                dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@@ -668,7 +733,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
 
        list_replace_rcu(&opp->node, &new_opp->node);
        mutex_unlock(&dev_opp_list_lock);
-       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, kfree_opp_rcu);
+       call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
        /* Notify the change of the OPP availability */
        if (availability_req)
@@ -700,10 +765,14 @@ unlock:
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
  */
 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
-       return opp_set_availability(dev, freq, true);
+       return _opp_set_availability(dev, freq, true);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
 
@@ -722,26 +791,41 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  * integrity of the internal data structures. Callers should ensure that
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
  */
 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
 {
-       return opp_set_availability(dev, freq, false);
+       return _opp_set_availability(dev, freq, false);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
 
 /**
  * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  * @dev:       device pointer used to lookup device OPPs.
+ *
+ * Return: pointer to  notifier head if found, otherwise -ENODEV or
+ * -EINVAL based on type of error casted as pointer. value must be checked
+ *  with IS_ERR to determine valid pointer or error result.
+ *
+ * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
-       struct device_opp *dev_opp = find_device_opp(dev);
+       struct device_opp *dev_opp = _find_device_opp(dev);
 
        if (IS_ERR(dev_opp))
                return ERR_CAST(dev_opp); /* matching type */
 
        return &dev_opp->srcu_head;
 }
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
 
 #ifdef CONFIG_OF
 /**
@@ -749,6 +833,22 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  * @dev:       device pointer used to lookup device OPPs.
  *
  * Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0           On success OR
+ *             Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST     Freq are same and volt are different OR
+ *             Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM     Memory allocation failure
+ * -ENODEV     when 'operating-points' property is not found or is invalid data
+ *             in device node.
+ * -ENODATA    when empty 'operating-points' property is found
  */
 int of_init_opp_table(struct device *dev)
 {
@@ -777,7 +877,7 @@ int of_init_opp_table(struct device *dev)
                unsigned long freq = be32_to_cpup(val++) * 1000;
                unsigned long volt = be32_to_cpup(val++);
 
-               if (dev_pm_opp_add_dynamic(dev, freq, volt, false))
+               if (_opp_add_dynamic(dev, freq, volt, false))
                        dev_warn(dev, "%s: Failed to add OPP %ld\n",
                                 __func__, freq);
                nr -= 2;
@@ -792,6 +892,12 @@ EXPORT_SYMBOL_GPL(of_init_opp_table);
  * @dev:       device pointer used to lookup device OPPs.
  *
  * Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
  */
 void of_free_opp_table(struct device *dev)
 {
@@ -799,7 +905,7 @@ void of_free_opp_table(struct device *dev)
        struct dev_pm_opp *opp, *tmp;
 
        /* Check for existing list for 'dev' */
-       dev_opp = find_device_opp(dev);
+       dev_opp = _find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                int error = PTR_ERR(dev_opp);
                if (error != -ENODEV)
@@ -816,7 +922,7 @@ void of_free_opp_table(struct device *dev)
        /* Free static OPPs */
        list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
                if (!opp->dynamic)
-                       __dev_pm_opp_remove(dev_opp, opp);
+                       _opp_remove(dev_opp, opp);
        }
 
        mutex_unlock(&dev_opp_list_lock);
index a8fe4c1..e56d538 100644 (file)
@@ -64,6 +64,8 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
        struct pm_qos_flags *pqf;
        s32 val;
 
+       lockdep_assert_held(&dev->power.lock);
+
        if (IS_ERR_OR_NULL(qos))
                return PM_QOS_FLAGS_UNDEFINED;
 
@@ -104,6 +106,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
  */
 s32 __dev_pm_qos_read_value(struct device *dev)
 {
+       lockdep_assert_held(&dev->power.lock);
+
        return IS_ERR_OR_NULL(dev->power.qos) ?
                0 : pm_qos_read_value(&dev->power.qos->resume_latency);
 }
index 63fc7f0..2a04d34 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 #include <xen/balloon.h>
+#include <xen/grant_table.h>
 #include "common.h"
 
 /*
@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644);
 
 #define BLKBACK_INVALID_HANDLE (~0)
 
-/* Number of free pages to remove on each call to free_xenballooned_pages */
+/* Number of free pages to remove on each call to gnttab_free_pages */
 #define NUM_BATCH_FREE_PAGES 10
 
 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
        if (list_empty(&blkif->free_pages)) {
                BUG_ON(blkif->free_pages_num != 0);
                spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
-               return alloc_xenballooned_pages(1, page, false);
+               return gnttab_alloc_pages(1, page);
        }
        BUG_ON(blkif->free_pages_num == 0);
        page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
                blkif->free_pages_num--;
                if (++num_pages == NUM_BATCH_FREE_PAGES) {
                        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
-                       free_xenballooned_pages(num_pages, page);
+                       gnttab_free_pages(num_pages, page);
                        spin_lock_irqsave(&blkif->free_pages_lock, flags);
                        num_pages = 0;
                }
        }
        spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
        if (num_pages != 0)
-               free_xenballooned_pages(num_pages, page);
+               gnttab_free_pages(num_pages, page);
 }
 
 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
        atomic_dec(&blkif->persistent_gnt_in_use);
 }
 
+static void free_persistent_gnts_unmap_callback(int result,
+                                               struct gntab_unmap_queue_data *data)
+{
+       struct completion *c = data->data;
+
+       /* BUG_ON used to reproduce existing behaviour,
+          but is this the best way to deal with this? */
+       BUG_ON(result);
+       complete(c);
+}
+
 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
                                  unsigned int num)
 {
@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct persistent_gnt *persistent_gnt;
        struct rb_node *n;
-       int ret = 0;
        int segs_to_unmap = 0;
+       struct gntab_unmap_queue_data unmap_data;
+       struct completion unmap_completion;
+
+       init_completion(&unmap_completion);
+
+       unmap_data.data = &unmap_completion;
+       unmap_data.done = &free_persistent_gnts_unmap_callback;
+       unmap_data.pages = pages;
+       unmap_data.unmap_ops = unmap;
+       unmap_data.kunmap_ops = NULL;
 
        foreach_grant_safe(persistent_gnt, n, root, node) {
                BUG_ON(persistent_gnt->handle ==
@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
                        !rb_next(&persistent_gnt->node)) {
-                       ret = gnttab_unmap_refs(unmap, NULL, pages,
-                               segs_to_unmap);
-                       BUG_ON(ret);
+
+                       unmap_data.count = segs_to_unmap;
+                       gnttab_unmap_refs_async(&unmap_data);
+                       wait_for_completion(&unmap_completion);
+
                        put_free_pages(blkif, pages, segs_to_unmap);
                        segs_to_unmap = 0;
                }
@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif)
        shrink_free_pagepool(blkif, 0 /* All */);
 }
 
-/*
- * Unmap the grant references, and also remove the M2P over-rides
- * used in the 'pending_req'.
- */
-static void xen_blkbk_unmap(struct xen_blkif *blkif,
-                            struct grant_page *pages[],
-                            int num)
+static unsigned int xen_blkbk_unmap_prepare(
+       struct xen_blkif *blkif,
+       struct grant_page **pages,
+       unsigned int num,
+       struct gnttab_unmap_grant_ref *unmap_ops,
+       struct page **unmap_pages)
 {
-       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-       struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        unsigned int i, invcount = 0;
-       int ret;
 
        for (i = 0; i < num; i++) {
                if (pages[i]->persistent_gnt != NULL) {
@@ -674,21 +693,95 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
                if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
                        continue;
                unmap_pages[invcount] = pages[i]->page;
-               gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
+               gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
                                    GNTMAP_host_map, pages[i]->handle);
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
-               if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
-                       ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
-                                               invcount);
+               invcount++;
+       }
+
+       return invcount;
+}
+
+static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
+{
+       struct pending_req* pending_req = (struct pending_req*) (data->data);
+       struct xen_blkif *blkif = pending_req->blkif;
+
+       /* BUG_ON used to reproduce existing behaviour,
+          but is this the best way to deal with this? */
+       BUG_ON(result);
+
+       put_free_pages(blkif, data->pages, data->count);
+       make_response(blkif, pending_req->id,
+                     pending_req->operation, pending_req->status);
+       free_req(blkif, pending_req);
+       /*
+        * Make sure the request is freed before releasing blkif,
+        * or there could be a race between free_req and the
+        * cleanup done in xen_blkif_free during shutdown.
+        *
+        * NB: The fact that we might try to wake up pending_free_wq
+        * before drain_complete (in case there's a drain going on)
+        * it's not a problem with our current implementation
+        * because we can assure there's no thread waiting on
+        * pending_free_wq if there's a drain going on, but it has
+        * to be taken into account if the current model is changed.
+        */
+       if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+               complete(&blkif->drain_complete);
+       }
+       xen_blkif_put(blkif);
+}
+
+static void xen_blkbk_unmap_and_respond(struct pending_req *req)
+{
+       struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
+       struct xen_blkif *blkif = req->blkif;
+       struct grant_page **pages = req->segments;
+       unsigned int invcount;
+
+       invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
+                                          req->unmap, req->unmap_pages);
+
+       work->data = req;
+       work->done = xen_blkbk_unmap_and_respond_callback;
+       work->unmap_ops = req->unmap;
+       work->kunmap_ops = NULL;
+       work->pages = req->unmap_pages;
+       work->count = invcount;
+
+       gnttab_unmap_refs_async(&req->gnttab_unmap_data);
+}
+
+
+/*
+ * Unmap the grant references.
+ *
+ * This could accumulate ops up to the batch size to reduce the number
+ * of hypercalls, but since this is only used in error paths there's
+ * no real need.
+ */
+static void xen_blkbk_unmap(struct xen_blkif *blkif,
+                            struct grant_page *pages[],
+                            int num)
+{
+       struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+       unsigned int invcount = 0;
+       int ret;
+
+       while (num) {
+               unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+               
+               invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
+                                                  unmap, unmap_pages);
+               if (invcount) {
+                       ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
                        BUG_ON(ret);
                        put_free_pages(blkif, unmap_pages, invcount);
-                       invcount = 0;
                }
-       }
-       if (invcount) {
-               ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
-               BUG_ON(ret);
-               put_free_pages(blkif, unmap_pages, invcount);
+               pages += batch;
+               num -= batch;
        }
 }
 
@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
         * the grant references associated with 'request' and provide
         * the proper response on the ring.
         */
-       if (atomic_dec_and_test(&pending_req->pendcnt)) {
-               struct xen_blkif *blkif = pending_req->blkif;
-
-               xen_blkbk_unmap(blkif,
-                               pending_req->segments,
-                               pending_req->nr_pages);
-               make_response(blkif, pending_req->id,
-                             pending_req->operation, pending_req->status);
-               free_req(blkif, pending_req);
-               /*
-                * Make sure the request is freed before releasing blkif,
-                * or there could be a race between free_req and the
-                * cleanup done in xen_blkif_free during shutdown.
-                *
-                * NB: The fact that we might try to wake up pending_free_wq
-                * before drain_complete (in case there's a drain going on)
-                * it's not a problem with our current implementation
-                * because we can assure there's no thread waiting on
-                * pending_free_wq if there's a drain going on, but it has
-                * to be taken into account if the current model is changed.
-                */
-               if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
-                       complete(&blkif->drain_complete);
-               }
-               xen_blkif_put(blkif);
-       }
+       if (atomic_dec_and_test(&pending_req->pendcnt))
+               xen_blkbk_unmap_and_respond(pending_req);
 }
 
 /*
index f65b807..cc90a84 100644 (file)
@@ -350,6 +350,9 @@ struct pending_req {
        struct grant_page       *indirect_pages[MAX_INDIRECT_PAGES];
        struct seg_buf          seg[MAX_INDIRECT_SEGMENTS];
        struct bio              *biolist[MAX_INDIRECT_SEGMENTS];
+       struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
+       struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
+       struct gntab_unmap_queue_data gnttab_unmap_data;
 };
 
 
index d5d4cd8..5c0baa9 100644 (file)
@@ -976,8 +976,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
        status = acpi_resource_to_address64(res, &addr);
 
        if (ACPI_SUCCESS(status)) {
-               hdp->hd_phys_address = addr.minimum;
-               hdp->hd_address = ioremap(addr.minimum, addr.address_length);
+               hdp->hd_phys_address = addr.address.minimum;
+               hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
 
                if (hpet_is_known(hdp)) {
                        iounmap(hdp->hd_address);
index 89ae88f..c59bdcb 100644 (file)
@@ -57,6 +57,16 @@ config X86_ACPI_CPUFREQ_CPB
          By enabling this option the acpi_cpufreq driver provides the old
          entry in addition to the new boost ones, for compatibility reasons.
 
+config X86_SFI_CPUFREQ
+       tristate "SFI Performance-States driver"
+       depends on X86_INTEL_MID && SFI
+       help
+         This adds a CPUFreq driver for some Silvermont based Intel Atom
+         architectures like Z34xx and Z35xx which enumerate processor
+         performance states through SFI.
+
+         If in doubt, say N.
+
 config ELAN_CPUFREQ
        tristate "AMD Elan SC400 and SC410"
        depends on MELAN
index b3ca7b0..8b4220a 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_X86_P4_CLOCKMOD)         += p4-clockmod.o
 obj-$(CONFIG_X86_CPUFREQ_NFORCE2)      += cpufreq-nforce2.o
 obj-$(CONFIG_X86_INTEL_PSTATE)         += intel_pstate.o
 obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY) += amd_freq_sensitivity.o
+obj-$(CONFIG_X86_SFI_CPUFREQ)          += sfi-cpufreq.o
 
 ##################################################################################
 # ARM SoC drivers
index fde97d6..bab67db 100644 (file)
@@ -320,8 +320,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
 {
        struct private_data *priv = policy->driver_data;
 
-       if (priv->cdev)
-               cpufreq_cooling_unregister(priv->cdev);
+       cpufreq_cooling_unregister(priv->cdev);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
        of_free_opp_table(priv->cpu_dev);
        clk_put(policy->clk);
index 46bed4f..28e59a4 100644 (file)
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/syscore_ops.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
 
+/* Macros to iterate over lists */
+/* Iterate over online CPUs policies */
+static LIST_HEAD(cpufreq_policy_list);
+#define for_each_policy(__policy)                              \
+       list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
+
+/* Iterate over governors */
+static LIST_HEAD(cpufreq_governor_list);
+#define for_each_governor(__governor)                          \
+       list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
+
 /**
  * The "cpufreq driver" - the arch- or hardware-dependent low
  * level driver of CPUFreq support, and its spinlock. This lock
@@ -40,7 +52,6 @@ static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
 static DEFINE_RWLOCK(cpufreq_driver_lock);
 DEFINE_MUTEX(cpufreq_governor_lock);
-static LIST_HEAD(cpufreq_policy_list);
 
 /* This one keeps track of the previously set governor of a removed CPU */
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
@@ -62,7 +73,7 @@ static DECLARE_RWSEM(cpufreq_rwsem);
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                unsigned int event);
-static unsigned int __cpufreq_get(unsigned int cpu);
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
 static void handle_update(struct work_struct *work);
 
 /**
@@ -93,7 +104,6 @@ void disable_cpufreq(void)
 {
        off = 1;
 }
-static LIST_HEAD(cpufreq_governor_list);
 static DEFINE_MUTEX(cpufreq_governor_mutex);
 
 bool have_governor_per_policy(void)
@@ -202,7 +212,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
        struct cpufreq_policy *policy = NULL;
        unsigned long flags;
 
-       if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
+       if (cpu >= nr_cpu_ids)
                return NULL;
 
        if (!down_read_trylock(&cpufreq_rwsem))
@@ -229,9 +239,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 
 void cpufreq_cpu_put(struct cpufreq_policy *policy)
 {
-       if (cpufreq_disabled())
-               return;
-
        kobject_put(&policy->kobj);
        up_read(&cpufreq_rwsem);
 }
@@ -249,12 +256,12 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
  * systems as each CPU might be scaled differently. So, use the arch
  * per-CPU loops_per_jiffy value wherever possible.
  */
-#ifndef CONFIG_SMP
-static unsigned long l_p_j_ref;
-static unsigned int l_p_j_ref_freq;
-
 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 {
+#ifndef CONFIG_SMP
+       static unsigned long l_p_j_ref;
+       static unsigned int l_p_j_ref_freq;
+
        if (ci->flags & CPUFREQ_CONST_LOOPS)
                return;
 
@@ -270,13 +277,8 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
                pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
                         loops_per_jiffy, ci->new);
        }
-}
-#else
-static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
-{
-       return;
-}
 #endif
+}
 
 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
                struct cpufreq_freqs *freqs, unsigned int state)
@@ -432,11 +434,11 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
 }
 define_one_global_rw(boost);
 
-static struct cpufreq_governor *__find_governor(const char *str_governor)
+static struct cpufreq_governor *find_governor(const char *str_governor)
 {
        struct cpufreq_governor *t;
 
-       list_for_each_entry(t, &cpufreq_governor_list, governor_list)
+       for_each_governor(t)
                if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
                        return t;
 
@@ -463,12 +465,12 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
                        *policy = CPUFREQ_POLICY_POWERSAVE;
                        err = 0;
                }
-       } else if (has_target()) {
+       } else {
                struct cpufreq_governor *t;
 
                mutex_lock(&cpufreq_governor_mutex);
 
-               t = __find_governor(str_governor);
+               t = find_governor(str_governor);
 
                if (t == NULL) {
                        int ret;
@@ -478,7 +480,7 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
                        mutex_lock(&cpufreq_governor_mutex);
 
                        if (ret == 0)
-                               t = __find_governor(str_governor);
+                               t = find_governor(str_governor);
                }
 
                if (t != NULL) {
@@ -513,8 +515,7 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 
-static ssize_t show_scaling_cur_freq(
-       struct cpufreq_policy *policy, char *buf)
+static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
 {
        ssize_t ret;
 
@@ -563,7 +564,7 @@ store_one(scaling_max_freq, max);
 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
                                        char *buf)
 {
-       unsigned int cur_freq = __cpufreq_get(policy->cpu);
+       unsigned int cur_freq = __cpufreq_get(policy);
        if (!cur_freq)
                return sprintf(buf, "<unknown>");
        return sprintf(buf, "%u\n", cur_freq);
@@ -639,7 +640,7 @@ static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
                goto out;
        }
 
-       list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
+       for_each_governor(t) {
                if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
                    - (CPUFREQ_NAME_LEN + 2)))
                        goto out;
@@ -902,7 +903,7 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
 
        /* set up files for this cpu device */
        drv_attr = cpufreq_driver->attr;
-       while ((drv_attr) && (*drv_attr)) {
+       while (drv_attr && *drv_attr) {
                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
                if (ret)
                        return ret;
@@ -936,7 +937,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        memcpy(&new_policy, policy, sizeof(*policy));
 
        /* Update governor of new_policy to the governor used before hotplug */
-       gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+       gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
        if (gov)
                pr_debug("Restoring governor %s for cpu %d\n",
                                policy->governor->name, policy->cpu);
@@ -958,7 +959,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        }
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
                                  unsigned int cpu, struct device *dev)
 {
@@ -996,7 +996,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
 
        return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 }
-#endif
 
 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 {
@@ -1033,6 +1032,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
        init_rwsem(&policy->rwsem);
        spin_lock_init(&policy->transition_lock);
        init_waitqueue_head(&policy->transition_wait);
+       init_completion(&policy->kobj_unregister);
+       INIT_WORK(&policy->update, handle_update);
 
        return policy;
 
@@ -1091,15 +1092,9 @@ static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
        }
 
        down_write(&policy->rwsem);
-
-       policy->last_cpu = policy->cpu;
        policy->cpu = cpu;
-
        up_write(&policy->rwsem);
 
-       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_UPDATE_POLICY_CPU, policy);
-
        return 0;
 }
 
@@ -1110,41 +1105,32 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        struct cpufreq_policy *policy;
        unsigned long flags;
        bool recover_policy = cpufreq_suspended;
-#ifdef CONFIG_HOTPLUG_CPU
-       struct cpufreq_policy *tpolicy;
-#endif
 
        if (cpu_is_offline(cpu))
                return 0;
 
        pr_debug("adding CPU %u\n", cpu);
 
-#ifdef CONFIG_SMP
        /* check whether a different CPU already registered this
         * CPU because it is in the same boat. */
-       policy = cpufreq_cpu_get(cpu);
-       if (unlikely(policy)) {
-               cpufreq_cpu_put(policy);
+       policy = cpufreq_cpu_get_raw(cpu);
+       if (unlikely(policy))
                return 0;
-       }
-#endif
 
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
-#ifdef CONFIG_HOTPLUG_CPU
        /* Check if this cpu was hot-unplugged earlier and has siblings */
        read_lock_irqsave(&cpufreq_driver_lock, flags);
-       list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
-               if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
+       for_each_policy(policy) {
+               if (cpumask_test_cpu(cpu, policy->related_cpus)) {
                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-                       ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
+                       ret = cpufreq_add_policy_cpu(policy, cpu, dev);
                        up_read(&cpufreq_rwsem);
                        return ret;
                }
        }
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
 
        /*
         * Restore the saved policy when doing light-weight init and fall back
@@ -1171,9 +1157,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
-       init_completion(&policy->kobj_unregister);
-       INIT_WORK(&policy->update, handle_update);
-
        /* call driver. From then on the cpufreq must be able
         * to accept all calls to ->verify and ->setpolicy for this CPU
         */
@@ -1371,11 +1354,10 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                        pr_err("%s: Failed to stop governor\n", __func__);
                        return ret;
                }
-       }
 
-       if (!cpufreq_driver->setpolicy)
                strncpy(per_cpu(cpufreq_cpu_governor, cpu),
                        policy->governor->name, CPUFREQ_NAME_LEN);
+       }
 
        down_read(&policy->rwsem);
        cpus = cpumask_weight(policy->cpus);
@@ -1416,9 +1398,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        unsigned long flags;
        struct cpufreq_policy *policy;
 
-       read_lock_irqsave(&cpufreq_driver_lock, flags);
+       write_lock_irqsave(&cpufreq_driver_lock, flags);
        policy = per_cpu(cpufreq_cpu_data, cpu);
-       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+       per_cpu(cpufreq_cpu_data, cpu) = NULL;
+       write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        if (!policy) {
                pr_debug("%s: No cpu_data found\n", __func__);
@@ -1473,7 +1456,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                }
        }
 
-       per_cpu(cpufreq_cpu_data, cpu) = NULL;
        return 0;
 }
 
@@ -1510,30 +1492,23 @@ static void handle_update(struct work_struct *work)
 /**
  *     cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
  *     in deep trouble.
- *     @cpu: cpu number
- *     @old_freq: CPU frequency the kernel thinks the CPU runs at
+ *     @policy: policy managing CPUs
  *     @new_freq: CPU frequency the CPU actually runs at
  *
  *     We adjust to current frequency first, and need to clean up later.
  *     So either call to cpufreq_update_policy() or schedule handle_update()).
  */
-static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
+static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
                                unsigned int new_freq)
 {
-       struct cpufreq_policy *policy;
        struct cpufreq_freqs freqs;
-       unsigned long flags;
 
        pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
-                old_freq, new_freq);
+                policy->cur, new_freq);
 
-       freqs.old = old_freq;
+       freqs.old = policy->cur;
        freqs.new = new_freq;
 
-       read_lock_irqsave(&cpufreq_driver_lock, flags);
-       policy = per_cpu(cpufreq_cpu_data, cpu);
-       read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
        cpufreq_freq_transition_begin(policy, &freqs);
        cpufreq_freq_transition_end(policy, &freqs, 0);
 }
@@ -1583,22 +1558,21 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
 }
 EXPORT_SYMBOL(cpufreq_quick_get_max);
 
-static unsigned int __cpufreq_get(unsigned int cpu)
+static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
        unsigned int ret_freq = 0;
 
        if (!cpufreq_driver->get)
                return ret_freq;
 
-       ret_freq = cpufreq_driver->get(cpu);
+       ret_freq = cpufreq_driver->get(policy->cpu);
 
        if (ret_freq && policy->cur &&
                !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
                /* verify no discrepancy between actual and
                                        saved value exists */
                if (unlikely(ret_freq != policy->cur)) {
-                       cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
+                       cpufreq_out_of_sync(policy, ret_freq);
                        schedule_work(&policy->update);
                }
        }
@@ -1619,7 +1593,7 @@ unsigned int cpufreq_get(unsigned int cpu)
 
        if (policy) {
                down_read(&policy->rwsem);
-               ret_freq = __cpufreq_get(cpu);
+               ret_freq = __cpufreq_get(policy);
                up_read(&policy->rwsem);
 
                cpufreq_cpu_put(policy);
@@ -1682,7 +1656,7 @@ void cpufreq_suspend(void)
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
-       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+       for_each_policy(policy) {
                if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
                        pr_err("%s: Failed to stop governor for policy: %p\n",
                                __func__, policy);
@@ -1716,7 +1690,7 @@ void cpufreq_resume(void)
 
        pr_debug("%s: Resuming Governors\n", __func__);
 
-       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+       for_each_policy(policy) {
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
@@ -2006,10 +1980,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
 
-/*
- * when "event" is CPUFREQ_GOV_LIMITS
- */
-
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                                        unsigned int event)
 {
@@ -2107,7 +2077,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
 
        governor->initialized = 0;
        err = -EBUSY;
-       if (__find_governor(governor->name) == NULL) {
+       if (!find_governor(governor->name)) {
                err = 0;
                list_add(&governor->governor_list, &cpufreq_governor_list);
        }
@@ -2307,8 +2277,7 @@ int cpufreq_update_policy(unsigned int cpu)
                        policy->cur = new_policy.cur;
                } else {
                        if (policy->cur != new_policy.cur && has_target())
-                               cpufreq_out_of_sync(cpu, policy->cur,
-                                                               new_policy.cur);
+                               cpufreq_out_of_sync(policy, new_policy.cur);
                }
        }
 
@@ -2364,7 +2333,7 @@ static int cpufreq_boost_set_sw(int state)
        struct cpufreq_policy *policy;
        int ret = -EINVAL;
 
-       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+       for_each_policy(policy) {
                freq_table = cpufreq_frequency_get_table(policy->cpu);
                if (freq_table) {
                        ret = cpufreq_frequency_table_cpuinfo(policy,
@@ -2454,9 +2423,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        pr_debug("trying to register driver %s\n", driver_data->name);
 
-       if (driver_data->setpolicy)
-               driver_data->flags |= CPUFREQ_CONST_LOOPS;
-
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2465,6 +2431,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        cpufreq_driver = driver_data;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       if (driver_data->setpolicy)
+               driver_data->flags |= CPUFREQ_CONST_LOOPS;
+
        if (cpufreq_boost_supported()) {
                /*
                 * Check if driver provides function to enable boost -
@@ -2485,23 +2454,12 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        if (ret)
                goto err_boost_unreg;
 
-       if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
-               int i;
-               ret = -ENODEV;
-
-               /* check for at least one working CPU */
-               for (i = 0; i < nr_cpu_ids; i++)
-                       if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
-                               ret = 0;
-                               break;
-                       }
-
+       if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
+           list_empty(&cpufreq_policy_list)) {
                /* if all ->init() calls failed, unregister */
-               if (ret) {
-                       pr_debug("no CPU initialized for driver %s\n",
-                                driver_data->name);
-                       goto err_if_unreg;
-               }
+               pr_debug("%s: No CPU initialized for driver %s\n", __func__,
+                        driver_data->name);
+               goto err_if_unreg;
        }
 
        register_hotcpu_notifier(&cpufreq_cpu_notifier);
@@ -2556,6 +2514,14 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
+/*
+ * Stop cpufreq at shutdown to make sure it isn't holding any locks
+ * or mutexes when secondary CPUs are halted.
+ */
+static struct syscore_ops cpufreq_syscore_ops = {
+       .shutdown = cpufreq_suspend,
+};
+
 static int __init cpufreq_core_init(void)
 {
        if (cpufreq_disabled())
@@ -2564,6 +2530,8 @@ static int __init cpufreq_core_init(void)
        cpufreq_global_kobject = kobject_create();
        BUG_ON(!cpufreq_global_kobject);
 
+       register_syscore_ops(&cpufreq_syscore_ops);
+
        return 0;
 }
 core_initcall(cpufreq_core_init);
index 0cd9b4d..5e370a3 100644 (file)
@@ -18,7 +18,6 @@
 static spinlock_t cpufreq_stats_lock;
 
 struct cpufreq_stats {
-       unsigned int cpu;
        unsigned int total_trans;
        unsigned long long last_time;
        unsigned int max_state;
@@ -31,50 +30,33 @@ struct cpufreq_stats {
 #endif
 };
 
-static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
-
-struct cpufreq_stats_attribute {
-       struct attribute attr;
-       ssize_t(*show) (struct cpufreq_stats *, char *);
-};
-
-static int cpufreq_stats_update(unsigned int cpu)
+static int cpufreq_stats_update(struct cpufreq_stats *stats)
 {
-       struct cpufreq_stats *stat;
-       unsigned long long cur_time;
+       unsigned long long cur_time = get_jiffies_64();
 
-       cur_time = get_jiffies_64();
        spin_lock(&cpufreq_stats_lock);
-       stat = per_cpu(cpufreq_stats_table, cpu);
-       if (stat->time_in_state)
-               stat->time_in_state[stat->last_index] +=
-                       cur_time - stat->last_time;
-       stat->last_time = cur_time;
+       stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
+       stats->last_time = cur_time;
        spin_unlock(&cpufreq_stats_lock);
        return 0;
 }
 
 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
 {
-       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
-       if (!stat)
-               return 0;
-       return sprintf(buf, "%d\n",
-                       per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
+       return sprintf(buf, "%d\n", policy->stats->total_trans);
 }
 
 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
 {
+       struct cpufreq_stats *stats = policy->stats;
        ssize_t len = 0;
        int i;
-       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
-       if (!stat)
-               return 0;
-       cpufreq_stats_update(stat->cpu);
-       for (i = 0; i < stat->state_num; i++) {
-               len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
+
+       cpufreq_stats_update(stats);
+       for (i = 0; i < stats->state_num; i++) {
+               len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
                        (unsigned long long)
-                       jiffies_64_to_clock_t(stat->time_in_state[i]));
+                       jiffies_64_to_clock_t(stats->time_in_state[i]));
        }
        return len;
 }
@@ -82,38 +64,35 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 {
+       struct cpufreq_stats *stats = policy->stats;
        ssize_t len = 0;
        int i, j;
 
-       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
-       if (!stat)
-               return 0;
-       cpufreq_stats_update(stat->cpu);
        len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
        len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
-       for (i = 0; i < stat->state_num; i++) {
+       for (i = 0; i < stats->state_num; i++) {
                if (len >= PAGE_SIZE)
                        break;
                len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
-                               stat->freq_table[i]);
+                               stats->freq_table[i]);
        }
        if (len >= PAGE_SIZE)
                return PAGE_SIZE;
 
        len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 
-       for (i = 0; i < stat->state_num; i++) {
+       for (i = 0; i < stats->state_num; i++) {
                if (len >= PAGE_SIZE)
                        break;
 
                len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
-                               stat->freq_table[i]);
+                               stats->freq_table[i]);
 
-               for (j = 0; j < stat->state_num; j++) {
+               for (j = 0; j < stats->state_num; j++) {
                        if (len >= PAGE_SIZE)
                                break;
                        len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
-                                       stat->trans_table[i*stat->max_state+j]);
+                                       stats->trans_table[i*stats->max_state+j]);
                }
                if (len >= PAGE_SIZE)
                        break;
@@ -142,28 +121,29 @@ static struct attribute_group stats_attr_group = {
        .name = "stats"
 };
 
-static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
+static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
 {
        int index;
-       for (index = 0; index < stat->max_state; index++)
-               if (stat->freq_table[index] == freq)
+       for (index = 0; index < stats->max_state; index++)
+               if (stats->freq_table[index] == freq)
                        return index;
        return -1;
 }
 
 static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
 {
-       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
+       struct cpufreq_stats *stats = policy->stats;
 
-       if (!stat)
+       /* Already freed */
+       if (!stats)
                return;
 
-       pr_debug("%s: Free stat table\n", __func__);
+       pr_debug("%s: Free stats table\n", __func__);
 
        sysfs_remove_group(&policy->kobj, &stats_attr_group);
-       kfree(stat->time_in_state);
-       kfree(stat);
-       per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
+       kfree(stats->time_in_state);
+       kfree(stats);
+       policy->stats = NULL;
 }
 
 static void cpufreq_stats_free_table(unsigned int cpu)
@@ -174,37 +154,33 @@ static void cpufreq_stats_free_table(unsigned int cpu)
        if (!policy)
                return;
 
-       if (cpufreq_frequency_get_table(policy->cpu))
-               __cpufreq_stats_free_table(policy);
+       __cpufreq_stats_free_table(policy);
 
        cpufreq_cpu_put(policy);
 }
 
 static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
 {
-       unsigned int i, count = 0, ret = 0;
-       struct cpufreq_stats *stat;
+       unsigned int i = 0, count = 0, ret = -ENOMEM;
+       struct cpufreq_stats *stats;
        unsigned int alloc_size;
        unsigned int cpu = policy->cpu;
        struct cpufreq_frequency_table *pos, *table;
 
+       /* We need cpufreq table for creating stats table */
        table = cpufreq_frequency_get_table(cpu);
        if (unlikely(!table))
                return 0;
 
-       if (per_cpu(cpufreq_stats_table, cpu))
-               return -EBUSY;
-       stat = kzalloc(sizeof(*stat), GFP_KERNEL);
-       if ((stat) == NULL)
-               return -ENOMEM;
-
-       ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
-       if (ret)
-               goto error_out;
+       /* stats already initialized */
+       if (policy->stats)
+               return -EEXIST;
 
-       stat->cpu = cpu;
-       per_cpu(cpufreq_stats_table, cpu) = stat;
+       stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+       if (!stats)
+               return -ENOMEM;
 
+       /* Find total allocation size */
        cpufreq_for_each_valid_entry(pos, table)
                count++;
 
@@ -213,32 +189,40 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
        alloc_size += count * count * sizeof(int);
 #endif
-       stat->max_state = count;
-       stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
-       if (!stat->time_in_state) {
-               ret = -ENOMEM;
-               goto error_alloc;
-       }
-       stat->freq_table = (unsigned int *)(stat->time_in_state + count);
+
+       /* Allocate memory for time_in_state/freq_table/trans_table in one go */
+       stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
+       if (!stats->time_in_state)
+               goto free_stat;
+
+       stats->freq_table = (unsigned int *)(stats->time_in_state + count);
 
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
-       stat->trans_table = stat->freq_table + count;
+       stats->trans_table = stats->freq_table + count;
 #endif
-       i = 0;
+
+       stats->max_state = count;
+
+       /* Find valid-unique entries */
        cpufreq_for_each_valid_entry(pos, table)
-               if (freq_table_get_index(stat, pos->frequency) == -1)
-                       stat->freq_table[i++] = pos->frequency;
-       stat->state_num = i;
-       spin_lock(&cpufreq_stats_lock);
-       stat->last_time = get_jiffies_64();
-       stat->last_index = freq_table_get_index(stat, policy->cur);
-       spin_unlock(&cpufreq_stats_lock);
-       return 0;
-error_alloc:
-       sysfs_remove_group(&policy->kobj, &stats_attr_group);
-error_out:
-       kfree(stat);
-       per_cpu(cpufreq_stats_table, cpu) = NULL;
+               if (freq_table_get_index(stats, pos->frequency) == -1)
+                       stats->freq_table[i++] = pos->frequency;
+
+       stats->state_num = i;
+       stats->last_time = get_jiffies_64();
+       stats->last_index = freq_table_get_index(stats, policy->cur);
+
+       policy->stats = stats;
+       ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
+       if (!ret)
+               return 0;
+
+       /* We failed, release resources */
+       policy->stats = NULL;
+       kfree(stats->time_in_state);
+free_stat:
+       kfree(stats);
+
        return ret;
 }
 
@@ -259,30 +243,12 @@ static void cpufreq_stats_create_table(unsigned int cpu)
        cpufreq_cpu_put(policy);
 }
 
-static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
-{
-       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
-                       policy->last_cpu);
-
-       pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
-                       policy->cpu, policy->last_cpu);
-       per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
-                       policy->last_cpu);
-       per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
-       stat->cpu = policy->cpu;
-}
-
 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
                unsigned long val, void *data)
 {
        int ret = 0;
        struct cpufreq_policy *policy = data;
 
-       if (val == CPUFREQ_UPDATE_POLICY_CPU) {
-               cpufreq_stats_update_policy_cpu(policy);
-               return 0;
-       }
-
        if (val == CPUFREQ_CREATE_POLICY)
                ret = __cpufreq_stats_create_table(policy);
        else if (val == CPUFREQ_REMOVE_POLICY)
@@ -295,35 +261,45 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
                unsigned long val, void *data)
 {
        struct cpufreq_freqs *freq = data;
-       struct cpufreq_stats *stat;
+       struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
+       struct cpufreq_stats *stats;
        int old_index, new_index;
 
-       if (val != CPUFREQ_POSTCHANGE)
+       if (!policy) {
+               pr_err("%s: No policy found\n", __func__);
                return 0;
+       }
 
-       stat = per_cpu(cpufreq_stats_table, freq->cpu);
-       if (!stat)
-               return 0;
+       if (val != CPUFREQ_POSTCHANGE)
+               goto put_policy;
 
-       old_index = stat->last_index;
-       new_index = freq_table_get_index(stat, freq->new);
+       if (!policy->stats) {
+               pr_debug("%s: No stats found\n", __func__);
+               goto put_policy;
+       }
 
-       /* We can't do stat->time_in_state[-1]= .. */
-       if (old_index == -1 || new_index == -1)
-               return 0;
+       stats = policy->stats;
+
+       old_index = stats->last_index;
+       new_index = freq_table_get_index(stats, freq->new);
 
-       cpufreq_stats_update(freq->cpu);
+       /* We can't do stats->time_in_state[-1]= .. */
+       if (old_index == -1 || new_index == -1)
+               goto put_policy;
 
        if (old_index == new_index)
-               return 0;
+               goto put_policy;
 
-       spin_lock(&cpufreq_stats_lock);
-       stat->last_index = new_index;
+       cpufreq_stats_update(stats);
+
+       stats->last_index = new_index;
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
-       stat->trans_table[old_index * stat->max_state + new_index]++;
+       stats->trans_table[old_index * stats->max_state + new_index]++;
 #endif
-       stat->total_trans++;
-       spin_unlock(&cpufreq_stats_lock);
+       stats->total_trans++;
+
+put_policy:
+       cpufreq_cpu_put(policy);
        return 0;
 }
 
@@ -374,8 +350,7 @@ static void __exit cpufreq_stats_exit(void)
 }
 
 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
-MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
-                               "through sysfs filesystem");
+MODULE_DESCRIPTION("Export cpufreq stats via sysfs");
 MODULE_LICENSE("GPL");
 
 module_init(cpufreq_stats_init);
index 742eefb..872c577 100644 (file)
@@ -148,6 +148,8 @@ struct perf_limits {
        int32_t min_perf;
        int max_policy_pct;
        int max_sysfs_pct;
+       int min_policy_pct;
+       int min_sysfs_pct;
 };
 
 static struct perf_limits limits = {
@@ -159,6 +161,8 @@ static struct perf_limits limits = {
        .min_perf = 0,
        .max_policy_pct = 100,
        .max_sysfs_pct = 100,
+       .min_policy_pct = 0,
+       .min_sysfs_pct = 0,
 };
 
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -338,6 +342,33 @@ static void __init intel_pstate_debug_expose_params(void)
                return sprintf(buf, "%u\n", limits.object);             \
        }
 
+static ssize_t show_turbo_pct(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       struct cpudata *cpu;
+       int total, no_turbo, turbo_pct;
+       uint32_t turbo_fp;
+
+       cpu = all_cpu_data[0];
+
+       total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+       no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
+       turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+       turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
+       return sprintf(buf, "%u\n", turbo_pct);
+}
+
+static ssize_t show_num_pstates(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       struct cpudata *cpu;
+       int total;
+
+       cpu = all_cpu_data[0];
+       total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
+       return sprintf(buf, "%u\n", total);
+}
+
 static ssize_t show_no_turbo(struct kobject *kobj,
                             struct attribute *attr, char *buf)
 {
@@ -404,7 +435,9 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
        ret = sscanf(buf, "%u", &input);
        if (ret != 1)
                return -EINVAL;
-       limits.min_perf_pct = clamp_t(int, input, 0 , 100);
+
+       limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
+       limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
        if (hwp_active)
@@ -418,11 +451,15 @@ show_one(min_perf_pct, min_perf_pct);
 define_one_global_rw(no_turbo);
 define_one_global_rw(max_perf_pct);
 define_one_global_rw(min_perf_pct);
+define_one_global_ro(turbo_pct);
+define_one_global_ro(num_pstates);
 
 static struct attribute *intel_pstate_attributes[] = {
        &no_turbo.attr,
        &max_perf_pct.attr,
        &min_perf_pct.attr,
+       &turbo_pct.attr,
+       &num_pstates.attr,
        NULL
 };
 
@@ -825,6 +862,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x46, core_params),
        ICPU(0x47, core_params),
        ICPU(0x4c, byt_params),
+       ICPU(0x4e, core_params),
        ICPU(0x4f, core_params),
        ICPU(0x56, core_params),
        {}
@@ -887,7 +925,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
-       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+       if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
+           policy->max >= policy->cpuinfo.max_freq) {
+               limits.min_policy_pct = 100;
                limits.min_perf_pct = 100;
                limits.min_perf = int_tofp(1);
                limits.max_policy_pct = 100;
@@ -897,8 +937,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                return 0;
        }
 
-       limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
-       limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+       limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+       limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
+       limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
        limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
 
        limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
@@ -978,6 +1019,7 @@ static struct cpufreq_driver intel_pstate_driver = {
 
 static int __initdata no_load;
 static int __initdata no_hwp;
+static int __initdata hwp_only;
 static unsigned int force_load;
 
 static int intel_pstate_msrs_not_valid(void)
@@ -1175,6 +1217,9 @@ static int __init intel_pstate_init(void)
        if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
                intel_pstate_hwp_enable();
 
+       if (!hwp_active && hwp_only)
+               goto out;
+
        rc = cpufreq_register_driver(&intel_pstate_driver);
        if (rc)
                goto out;
@@ -1209,6 +1254,8 @@ static int __init intel_pstate_setup(char *str)
                no_hwp = 1;
        if (!strcmp(str, "force"))
                force_load = 1;
+       if (!strcmp(str, "hwp_only"))
+               hwp_only = 1;
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
index 25fbd6a..f0913ee 100644 (file)
@@ -210,7 +210,6 @@ out:
 static struct platform_driver ls1x_cpufreq_platdrv = {
        .driver = {
                .name   = "ls1x-cpufreq",
-               .owner  = THIS_MODULE,
        },
        .probe          = ls1x_cpufreq_probe,
        .remove         = ls1x_cpufreq_remove,
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
new file mode 100644 (file)
index 0000000..ffa3389
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ *  SFI Performance States Driver
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  Author: Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>
+ *  Author: Srinidhi Kasagar <srinidhi.kasagar@intel.com>
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sfi.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/msr.h>
+
+struct cpufreq_frequency_table *freq_table;
+static struct sfi_freq_table_entry *sfi_cpufreq_array;
+static int num_freq_table_entries;
+
+static int sfi_parse_freq(struct sfi_table_header *table)
+{
+       struct sfi_table_simple *sb;
+       struct sfi_freq_table_entry *pentry;
+       int totallen;
+
+       sb = (struct sfi_table_simple *)table;
+       num_freq_table_entries = SFI_GET_NUM_ENTRIES(sb,
+                       struct sfi_freq_table_entry);
+       if (num_freq_table_entries <= 1) {
+               pr_err("No p-states discovered\n");
+               return -ENODEV;
+       }
+
+       pentry = (struct sfi_freq_table_entry *)sb->pentry;
+       totallen = num_freq_table_entries * sizeof(*pentry);
+
+       sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+       if (!sfi_cpufreq_array)
+               return -ENOMEM;
+
+       memcpy(sfi_cpufreq_array, pentry, totallen);
+
+       return 0;
+}
+
+static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index)
+{
+       unsigned int next_perf_state = 0; /* Index into perf table */
+       u32 lo, hi;
+
+       next_perf_state = policy->freq_table[index].driver_data;
+
+       rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi);
+       lo = (lo & ~INTEL_PERF_CTL_MASK) |
+               ((u32) sfi_cpufreq_array[next_perf_state].ctrl_val &
+               INTEL_PERF_CTL_MASK);
+       wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi);
+
+       return 0;
+}
+
+static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+       policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
+       policy->cpuinfo.transition_latency = 100000;    /* 100us */
+
+       return cpufreq_table_validate_and_show(policy, freq_table);
+}
+
+static struct cpufreq_driver sfi_cpufreq_driver = {
+       .flags          = CPUFREQ_CONST_LOOPS,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = sfi_cpufreq_target,
+       .init           = sfi_cpufreq_cpu_init,
+       .name           = "sfi-cpufreq",
+       .attr           = cpufreq_generic_attr,
+};
+
+static int __init sfi_cpufreq_init(void)
+{
+       int ret, i;
+
+       /* parse the freq table from SFI */
+       ret = sfi_table_parse(SFI_SIG_FREQ, NULL, NULL, sfi_parse_freq);
+       if (ret)
+               return ret;
+
+       freq_table = kzalloc(sizeof(*freq_table) *
+                       (num_freq_table_entries + 1), GFP_KERNEL);
+       if (!freq_table) {
+               ret = -ENOMEM;
+               goto err_free_array;
+       }
+
+       for (i = 0; i < num_freq_table_entries; i++) {
+               freq_table[i].driver_data = i;
+               freq_table[i].frequency = sfi_cpufreq_array[i].freq_mhz * 1000;
+       }
+       freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+       ret = cpufreq_register_driver(&sfi_cpufreq_driver);
+       if (ret)
+               goto err_free_tbl;
+
+       return ret;
+
+err_free_tbl:
+       kfree(freq_table);
+err_free_array:
+       kfree(sfi_cpufreq_array);
+       return ret;
+}
+late_initcall(sfi_cpufreq_init);
+
+static void __exit sfi_cpufreq_exit(void)
+{
+       cpufreq_unregister_driver(&sfi_cpufreq_driver);
+       kfree(freq_table);
+       kfree(sfi_cpufreq_array);
+}
+module_exit(sfi_cpufreq_exit);
+
+MODULE_AUTHOR("Vishwesh M Rudramuni <vishwesh.m.rudramuni@intel.com>");
+MODULE_DESCRIPTION("SFI Performance-States Driver");
+MODULE_LICENSE("GPL");
index e3e225f..40c34fa 100644 (file)
@@ -182,6 +182,10 @@ static int __init bl_idle_init(void)
         */
        if (!of_match_node(compatible_machine_match, root))
                return -ENODEV;
+
+       if (!mcpm_is_available())
+               return -EUNATCH;
+
        /*
         * For now the differentiation between little and big cores
         * is based on the part number. A7 cores are considered little
index 3891f67..64281bb 100644 (file)
@@ -88,4 +88,16 @@ config ARM_EXYNOS5_BUS_DEVFREQ
          It reads PPMU counters of memory controllers and adjusts the
          operating frequencies and voltages with OPP support.
 
+config ARM_TEGRA_DEVFREQ
+       tristate "Tegra DEVFREQ Driver"
+       depends on ARCH_TEGRA_124_SOC
+       select DEVFREQ_GOV_SIMPLE_ONDEMAND
+       select PM_OPP
+       help
+         This adds the DEVFREQ driver for the Tegra family of SoCs.
+         It reads ACTMON counters of memory controllers and adjusts the
+         operating frequencies and voltages with OPP support.
+
+source "drivers/devfreq/event/Kconfig"
+
 endif # PM_DEVFREQ
index 16138c9..5134f9e 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PM_DEVFREQ)       += devfreq.o
+obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
 obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)      += governor_simpleondemand.o
 obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE)  += governor_performance.o
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)    += governor_powersave.o
@@ -7,3 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)     += governor_userspace.o
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ)  += exynos/
 obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ)  += exynos/
+obj-$(CONFIG_ARM_TEGRA_DEVFREQ)                += tegra-devfreq.o
+
+# DEVFREQ Event Drivers
+obj-$(CONFIG_PM_DEVFREQ_EVENT)         += event/
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
new file mode 100644 (file)
index 0000000..f304a02
--- /dev/null
@@ -0,0 +1,494 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/devfreq.c.
+ */
+
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/of.h>
+
+static struct class *devfreq_event_class;
+
+/* The list of all devfreq event list */
+static LIST_HEAD(devfreq_event_list);
+static DEFINE_MUTEX(devfreq_event_list_lock);
+
+#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
+
+/**
+ * devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
+ *                              the enable_count of devfreq-event dev.
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function increase the enable_count and enable the
+ * devfreq-event device. The devfreq-event device should be enabled before
+ * using it by devfreq device.
+ */
+int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+       int ret = 0;
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       mutex_lock(&edev->lock);
+       if (edev->desc->ops && edev->desc->ops->enable
+                       && edev->enable_count == 0) {
+               ret = edev->desc->ops->enable(edev);
+               if (ret < 0)
+                       goto err;
+       }
+       edev->enable_count++;
+err:
+       mutex_unlock(&edev->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
+
+/**
+ * devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
+ *                               the enable_count of the devfreq-event dev.
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function decrease the enable_count and disable the
+ * devfreq-event device. After the devfreq-event device is disabled,
+ * devfreq device can't use the devfreq-event device for get/set/reset
+ * operations.
+ */
+int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+       int ret = 0;
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       mutex_lock(&edev->lock);
+       if (edev->enable_count <= 0) {
+               dev_warn(&edev->dev, "unbalanced enable_count\n");
+               ret = -EIO;
+               goto err;
+       }
+
+       if (edev->desc->ops && edev->desc->ops->disable
+                       && edev->enable_count == 1) {
+               ret = edev->desc->ops->disable(edev);
+               if (ret < 0)
+                       goto err;
+       }
+       edev->enable_count--;
+err:
+       mutex_unlock(&edev->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
+
+/**
+ * devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
+ *                             not.
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function check whether devfreq-event dev is enabled or not.
+ * If return true, the devfreq-event dev is enabeld. If return false, the
+ * devfreq-event dev is disabled.
+ */
+bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+       bool enabled = false;
+
+       if (!edev || !edev->desc)
+               return enabled;
+
+       mutex_lock(&edev->lock);
+
+       if (edev->enable_count > 0)
+               enabled = true;
+
+       mutex_unlock(&edev->lock);
+
+       return enabled;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
+
+/**
+ * devfreq_event_set_event() - Set event to devfreq-event dev to start.
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function set the event to the devfreq-event device to start
+ * for getting the event data which could be various event type.
+ */
+int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+       int ret;
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       if (!edev->desc->ops || !edev->desc->ops->set_event)
+               return -EINVAL;
+
+       if (!devfreq_event_is_enabled(edev))
+               return -EPERM;
+
+       mutex_lock(&edev->lock);
+       ret = edev->desc->ops->set_event(edev);
+       mutex_unlock(&edev->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_set_event);
+
+/**
+ * devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
+ * @edev       : the devfreq-event device
+ * @edata      : the calculated data of devfreq-event device
+ *
+ * Note that this function get the calculated event data from devfreq-event dev
+ * after stoping the progress of whole sequence of devfreq-event dev.
+ */
+int devfreq_event_get_event(struct devfreq_event_dev *edev,
+                           struct devfreq_event_data *edata)
+{
+       int ret;
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       if (!edev->desc->ops || !edev->desc->ops->get_event)
+               return -EINVAL;
+
+       if (!devfreq_event_is_enabled(edev))
+               return -EINVAL;
+
+       edata->total_count = edata->load_count = 0;
+
+       mutex_lock(&edev->lock);
+       ret = edev->desc->ops->get_event(edev, edata);
+       if (ret < 0)
+               edata->total_count = edata->load_count = 0;
+       mutex_unlock(&edev->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_event);
+
+/**
+ * devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function stop all operations of devfreq-event dev and reset
+ * the current event data to make the devfreq-event device into initial state.
+ */
+int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+       int ret = 0;
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       if (!devfreq_event_is_enabled(edev))
+               return -EPERM;
+
+       mutex_lock(&edev->lock);
+       if (edev->desc->ops && edev->desc->ops->reset)
+               ret = edev->desc->ops->reset(edev);
+       mutex_unlock(&edev->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
+
+/**
+ * devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
+ *                                      devicetree.
+ * @dev                : the pointer to the given device
+ * @index      : the index into list of devfreq-event device
+ *
+ * Note that this function return the pointer of devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
+                                                     int index)
+{
+       struct device_node *node;
+       struct devfreq_event_dev *edev;
+
+       if (!dev->of_node) {
+               dev_err(dev, "device does not have a device node entry\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       node = of_parse_phandle(dev->of_node, "devfreq-events", index);
+       if (!node) {
+               dev_err(dev, "failed to get phandle in %s node\n",
+                       dev->of_node->full_name);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_lock(&devfreq_event_list_lock);
+       list_for_each_entry(edev, &devfreq_event_list, node) {
+               if (!strcmp(edev->desc->name, node->name))
+                       goto out;
+       }
+       edev = NULL;
+out:
+       mutex_unlock(&devfreq_event_list_lock);
+
+       if (!edev) {
+               dev_err(dev, "unable to get devfreq-event device : %s\n",
+                       node->name);
+               of_node_put(node);
+               return ERR_PTR(-ENODEV);
+       }
+
+       of_node_put(node);
+
+       return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
+
+/**
+ * devfreq_event_get_edev_count() - Get the count of devfreq-event dev
+ * @dev                : the pointer to the given device
+ *
+ * Note that this function return the count of devfreq-event devices.
+ */
+int devfreq_event_get_edev_count(struct device *dev)
+{
+       int count;
+
+       if (!dev->of_node) {
+               dev_err(dev, "device does not have a device node entry\n");
+               return -EINVAL;
+       }
+
+       count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
+                                               sizeof(u32));
+       if (count < 0 ) {
+               dev_err(dev,
+                       "failed to get the count of devfreq-event in %s node\n",
+                       dev->of_node->full_name);
+               return count;
+       }
+
+       return count;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
+
+static void devfreq_event_release_edev(struct device *dev)
+{
+       struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+       kfree(edev);
+}
+
+/**
+ * devfreq_event_add_edev() - Add new devfreq-event device.
+ * @dev                : the device owning the devfreq-event device being created
+ * @desc       : the devfreq-event device's decriptor which include essential
+ *               data for devfreq-event device.
+ *
+ * Note that this function add new devfreq-event device to devfreq-event class
+ * list and register the device of the devfreq-event device.
+ */
+struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+                                               struct devfreq_event_desc *desc)
+{
+       struct devfreq_event_dev *edev;
+       static atomic_t event_no = ATOMIC_INIT(0);
+       int ret;
+
+       if (!dev || !desc)
+               return ERR_PTR(-EINVAL);
+
+       if (!desc->name || !desc->ops)
+               return ERR_PTR(-EINVAL);
+
+       if (!desc->ops->set_event || !desc->ops->get_event)
+               return ERR_PTR(-EINVAL);
+
+       edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
+       if (!edev)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_init(&edev->lock);
+       edev->desc = desc;
+       edev->enable_count = 0;
+       edev->dev.parent = dev;
+       edev->dev.class = devfreq_event_class;
+       edev->dev.release = devfreq_event_release_edev;
+
+       dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
+       ret = device_register(&edev->dev);
+       if (ret < 0) {
+               put_device(&edev->dev);
+               return ERR_PTR(ret);
+       }
+       dev_set_drvdata(&edev->dev, edev);
+
+       INIT_LIST_HEAD(&edev->node);
+
+       mutex_lock(&devfreq_event_list_lock);
+       list_add(&edev->node, &devfreq_event_list);
+       mutex_unlock(&devfreq_event_list_lock);
+
+       return edev;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
+
+/**
+ * devfreq_event_remove_edev() - Remove the devfreq-event device registered.
+ * @dev                : the devfreq-event device
+ *
+ * Note that this function remove the registered devfreq-event device.
+ */
+int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+       if (!edev)
+               return -EINVAL;
+
+       WARN_ON(edev->enable_count);
+
+       mutex_lock(&devfreq_event_list_lock);
+       list_del(&edev->node);
+       mutex_unlock(&devfreq_event_list_lock);
+
+       device_unregister(&edev->dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
+
+static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
+{
+       struct devfreq_event_dev **r = res;
+
+       if (WARN_ON(!r || !*r))
+               return 0;
+
+       return *r == data;
+}
+
+static void devm_devfreq_event_release(struct device *dev, void *res)
+{
+       devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
+}
+
+/**
+ * devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
+ * @dev                : the device owning the devfreq-event device being created
+ * @desc       : the devfreq-event device's decriptor which include essential
+ *               data for devfreq-event device.
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management and simplify the free operation
+ * for memory of devfreq-event device.
+ */
+struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+                                               struct devfreq_event_desc *desc)
+{
+       struct devfreq_event_dev **ptr, *edev;
+
+       ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       edev = devfreq_event_add_edev(dev, desc);
+       if (IS_ERR(edev)) {
+               devres_free(ptr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       *ptr = edev;
+       devres_add(dev, ptr);
+
+       return edev;
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
+
+/**
+ * devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
+ * @dev                : the device owning the devfreq-event device being created
+ * @edev       : the devfreq-event device
+ *
+ * Note that this function manages automatically the memory of devfreq-event
+ * device using device resource management.
+ */
+void devm_devfreq_event_remove_edev(struct device *dev,
+                               struct devfreq_event_dev *edev)
+{
+       WARN_ON(devres_release(dev, devm_devfreq_event_release,
+                              devm_devfreq_event_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
+
+/*
+ * Device attributes for devfreq-event class.
+ */
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       return sprintf(buf, "%s\n", edev->desc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t enable_count_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct devfreq_event_dev *edev = to_devfreq_event(dev);
+
+       if (!edev || !edev->desc)
+               return -EINVAL;
+
+       return sprintf(buf, "%d\n", edev->enable_count);
+}
+static DEVICE_ATTR_RO(enable_count);
+
+static struct attribute *devfreq_event_attrs[] = {
+       &dev_attr_name.attr,
+       &dev_attr_enable_count.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(devfreq_event);
+
+static int __init devfreq_event_init(void)
+{
+       devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
+       if (IS_ERR(devfreq_event_class)) {
+               pr_err("%s: couldn't create class\n", __FILE__);
+               return PTR_ERR(devfreq_event_class);
+       }
+
+       devfreq_event_class->dev_groups = devfreq_event_groups;
+
+       return 0;
+}
+subsys_initcall(devfreq_event_init);
+
+static void __exit devfreq_event_exit(void)
+{
+       class_destroy(devfreq_event_class);
+}
+module_exit(devfreq_event_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ-Event class support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
new file mode 100644 (file)
index 0000000..a11720a
--- /dev/null
@@ -0,0 +1,25 @@
+menuconfig PM_DEVFREQ_EVENT
+       bool "DEVFREQ-Event device Support"
+       help
+         The devfreq-event device provide the raw data and events which
+         indicate the current state of devfreq-event device. The provided
+         data from devfreq-event device is used to monitor the state of
+         device and determine the suitable size of resource to reduce the
+         wasted resource.
+
+         The devfreq-event device can support the various type of events
+         (e.g., raw data, utilization, latency, bandwidth). The events
+         may be used by devfreq governor and other subsystem.
+
+if PM_DEVFREQ_EVENT
+
+config DEVFREQ_EVENT_EXYNOS_PPMU
+       bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
+       depends on ARCH_EXYNOS
+       select PM_OPP
+       help
+         This add the devfreq-event driver for Exynos SoC. It provides PPMU
+         (Platform Performance Monitoring Unit) counters to estimate the
+         utilization of each module.
+
+endif # PM_DEVFREQ_EVENT
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
new file mode 100644 (file)
index 0000000..be146ea
--- /dev/null
@@ -0,0 +1,2 @@
+# Exynos DEVFREQ Event Drivers
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
new file mode 100644 (file)
index 0000000..135be0a
--- /dev/null
@@ -0,0 +1,374 @@
+/*
+ * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include <linux/devfreq-event.h>
+
+#include "exynos-ppmu.h"
+
+struct exynos_ppmu_data {
+       void __iomem *base;
+       struct clk *clk;
+};
+
+struct exynos_ppmu {
+       struct devfreq_event_dev **edev;
+       struct devfreq_event_desc *desc;
+       unsigned int num_events;
+
+       struct device *dev;
+       struct mutex lock;
+
+       struct exynos_ppmu_data ppmu;
+};
+
+#define PPMU_EVENT(name)                       \
+       { "ppmu-event0-"#name, PPMU_PMNCNT0 },  \
+       { "ppmu-event1-"#name, PPMU_PMNCNT1 },  \
+       { "ppmu-event2-"#name, PPMU_PMNCNT2 },  \
+       { "ppmu-event3-"#name, PPMU_PMNCNT3 }
+
+struct __exynos_ppmu_events {
+       char *name;
+       int id;
+} ppmu_events[] = {
+       /* For Exynos3250, Exynos4 and Exynos5260 */
+       PPMU_EVENT(g3d),
+       PPMU_EVENT(fsys),
+
+       /* For Exynos4 SoCs and Exynos3250 */
+       PPMU_EVENT(dmc0),
+       PPMU_EVENT(dmc1),
+       PPMU_EVENT(cpu),
+       PPMU_EVENT(rightbus),
+       PPMU_EVENT(leftbus),
+       PPMU_EVENT(lcd0),
+       PPMU_EVENT(camif),
+
+       /* Only for Exynos3250 and Exynos5260 */
+       PPMU_EVENT(mfc),
+
+       /* Only for Exynos4 SoCs */
+       PPMU_EVENT(mfc-left),
+       PPMU_EVENT(mfc-right),
+
+       /* Only for Exynos5260 SoCs */
+       PPMU_EVENT(drex0-s0),
+       PPMU_EVENT(drex0-s1),
+       PPMU_EVENT(drex1-s0),
+       PPMU_EVENT(drex1-s1),
+       PPMU_EVENT(eagle),
+       PPMU_EVENT(kfc),
+       PPMU_EVENT(isp),
+       PPMU_EVENT(fimc),
+       PPMU_EVENT(gscl),
+       PPMU_EVENT(mscl),
+       PPMU_EVENT(fimd0x),
+       PPMU_EVENT(fimd1x),
+       { /* sentinel */ },
+};
+
+static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
+               if (!strcmp(edev->desc->name, ppmu_events[i].name))
+                       return ppmu_events[i].id;
+
+       return -EINVAL;
+}
+
+static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       u32 pmnc;
+
+       /* Disable all counters */
+       __raw_writel(PPMU_CCNT_MASK |
+                    PPMU_PMCNT0_MASK |
+                    PPMU_PMCNT1_MASK |
+                    PPMU_PMCNT2_MASK |
+                    PPMU_PMCNT3_MASK,
+                    info->ppmu.base + PPMU_CNTENC);
+
+       /* Disable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+       return 0;
+}
+
+static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int id = exynos_ppmu_find_ppmu_id(edev);
+       u32 pmnc, cntens;
+
+       if (id < 0)
+               return id;
+
+       /* Enable specific counter */
+       cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
+       cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+       __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
+
+       /* Set the event of Read/Write data count  */
+       __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
+                       info->ppmu.base + PPMU_BEVTxSEL(id));
+
+       /* Reset cycle counter/performance counter and enable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+                       | PPMU_PMNC_COUNTER_RESET_MASK
+                       | PPMU_PMNC_CC_RESET_MASK);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+       pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+       return 0;
+}
+
+static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
+                               struct devfreq_event_data *edata)
+{
+       struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+       int id = exynos_ppmu_find_ppmu_id(edev);
+       u32 pmnc, cntenc;
+
+       if (id < 0)
+               return -EINVAL;
+
+       /* Disable PPMU */
+       pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
+       pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+       __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
+
+       /* Read cycle count */
+       edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
+
+       /* Read performance count */
+       switch (id) {
+       case PPMU_PMNCNT0:
+       case PPMU_PMNCNT1:
+       case PPMU_PMNCNT2:
+               edata->load_count
+                       = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
+               break;
+       case PPMU_PMNCNT3:
+               edata->load_count =
+                       ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
+                       | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Disable specific counter */
+       cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
+       cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+       __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
+
+       dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+                                       edata->load_count, edata->total_count);
+
+       return 0;
+}
+
+static struct devfreq_event_ops exynos_ppmu_ops = {
+       .disable = exynos_ppmu_disable,
+       .set_event = exynos_ppmu_set_event,
+       .get_event = exynos_ppmu_get_event,
+};
+
+static int of_get_devfreq_events(struct device_node *np,
+                                struct exynos_ppmu *info)
+{
+       struct devfreq_event_desc *desc;
+       struct device *dev = info->dev;
+       struct device_node *events_np, *node;
+       int i, j, count;
+
+       events_np = of_get_child_by_name(np, "events");
+       if (!events_np) {
+               dev_err(dev,
+                       "failed to get child node of devfreq-event devices\n");
+               return -EINVAL;
+       }
+
+       count = of_get_child_count(events_np);
+       desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+       info->num_events = count;
+
+       j = 0;
+       for_each_child_of_node(events_np, node) {
+               for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
+                       if (!ppmu_events[i].name)
+                               continue;
+
+                       if (!of_node_cmp(node->name, ppmu_events[i].name))
+                               break;
+               }
+
+               if (i == ARRAY_SIZE(ppmu_events)) {
+                       dev_warn(dev,
+                               "don't know how to configure events : %s\n",
+                               node->name);
+                       continue;
+               }
+
+               desc[j].ops = &exynos_ppmu_ops;
+               desc[j].driver_data = info;
+
+               of_property_read_string(node, "event-name", &desc[j].name);
+
+               j++;
+
+               of_node_put(node);
+       }
+       info->desc = desc;
+
+       of_node_put(events_np);
+
+       return 0;
+}
+
+static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
+{
+       struct device *dev = info->dev;
+       struct device_node *np = dev->of_node;
+       int ret = 0;
+
+       if (!np) {
+               dev_err(dev, "failed to find devicetree node\n");
+               return -EINVAL;
+       }
+
+       /* Maps the memory mapped IO to control PPMU register */
+       info->ppmu.base = of_iomap(np, 0);
+       if (IS_ERR_OR_NULL(info->ppmu.base)) {
+               dev_err(dev, "failed to map memory region\n");
+               return -ENOMEM;
+       }
+
+       info->ppmu.clk = devm_clk_get(dev, "ppmu");
+       if (IS_ERR(info->ppmu.clk)) {
+               info->ppmu.clk = NULL;
+               dev_warn(dev, "cannot get PPMU clock\n");
+       }
+
+       ret = of_get_devfreq_events(np, info);
+       if (ret < 0) {
+               dev_err(dev, "failed to parse exynos ppmu dt node\n");
+               goto err;
+       }
+
+       return 0;
+
+err:
+       iounmap(info->ppmu.base);
+
+       return ret;
+}
+
+static int exynos_ppmu_probe(struct platform_device *pdev)
+{
+       struct exynos_ppmu *info;
+       struct devfreq_event_dev **edev;
+       struct devfreq_event_desc *desc;
+       int i, ret = 0, size;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       mutex_init(&info->lock);
+       info->dev = &pdev->dev;
+
+       /* Parse dt data to get resource */
+       ret = exynos_ppmu_parse_dt(info);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "failed to parse devicetree for resource\n");
+               return ret;
+       }
+       desc = info->desc;
+
+       size = sizeof(struct devfreq_event_dev *) * info->num_events;
+       info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+       if (!info->edev) {
+               dev_err(&pdev->dev,
+                       "failed to allocate memory devfreq-event devices\n");
+               return -ENOMEM;
+       }
+       edev = info->edev;
+       platform_set_drvdata(pdev, info);
+
+       for (i = 0; i < info->num_events; i++) {
+               edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
+               if (IS_ERR(edev)) {
+                       ret = PTR_ERR(edev);
+                       dev_err(&pdev->dev,
+                               "failed to add devfreq-event device\n");
+                       goto err;
+               }
+       }
+
+       clk_prepare_enable(info->ppmu.clk);
+
+       return 0;
+err:
+       iounmap(info->ppmu.base);
+
+       return ret;
+}
+
+static int exynos_ppmu_remove(struct platform_device *pdev)
+{
+       struct exynos_ppmu *info = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(info->ppmu.clk);
+       iounmap(info->ppmu.base);
+
+       return 0;
+}
+
+static struct of_device_id exynos_ppmu_id_match[] = {
+       { .compatible = "samsung,exynos-ppmu", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver exynos_ppmu_driver = {
+       .probe  = exynos_ppmu_probe,
+       .remove = exynos_ppmu_remove,
+       .driver = {
+               .name   = "exynos-ppmu",
+               .of_match_table = exynos_ppmu_id_match,
+       },
+};
+module_platform_driver(exynos_ppmu_driver);
+
+MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
new file mode 100644 (file)
index 0000000..4e831d4
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * exynos_ppmu.h - EXYNOS PPMU header file
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PPMU_H__
+#define __EXYNOS_PPMU_H__
+
+enum ppmu_state {
+       PPMU_DISABLE = 0,
+       PPMU_ENABLE,
+};
+
+enum ppmu_counter {
+       PPMU_PMNCNT0 = 0,
+       PPMU_PMNCNT1,
+       PPMU_PMNCNT2,
+       PPMU_PMNCNT3,
+
+       PPMU_PMNCNT_MAX,
+};
+
+enum ppmu_event_type {
+       PPMU_RO_BUSY_CYCLE_CNT  = 0x0,
+       PPMU_WO_BUSY_CYCLE_CNT  = 0x1,
+       PPMU_RW_BUSY_CYCLE_CNT  = 0x2,
+       PPMU_RO_REQUEST_CNT     = 0x3,
+       PPMU_WO_REQUEST_CNT     = 0x4,
+       PPMU_RO_DATA_CNT        = 0x5,
+       PPMU_WO_DATA_CNT        = 0x6,
+       PPMU_RO_LATENCY         = 0x12,
+       PPMU_WO_LATENCY         = 0x16,
+};
+
+enum ppmu_reg {
+       /* PPC control register */
+       PPMU_PMNC               = 0x00,
+       PPMU_CNTENS             = 0x10,
+       PPMU_CNTENC             = 0x20,
+       PPMU_INTENS             = 0x30,
+       PPMU_INTENC             = 0x40,
+       PPMU_FLAG               = 0x50,
+
+       /* Cycle Counter and Performance Event Counter Register */
+       PPMU_CCNT               = 0x100,
+       PPMU_PMCNT0             = 0x110,
+       PPMU_PMCNT1             = 0x120,
+       PPMU_PMCNT2             = 0x130,
+       PPMU_PMCNT3_HIGH        = 0x140,
+       PPMU_PMCNT3_LOW         = 0x150,
+
+       /* Bus Event Generator */
+       PPMU_BEVT0SEL           = 0x1000,
+       PPMU_BEVT1SEL           = 0x1100,
+       PPMU_BEVT2SEL           = 0x1200,
+       PPMU_BEVT3SEL           = 0x1300,
+       PPMU_COUNTER_RESET      = 0x1810,
+       PPMU_READ_OVERFLOW_CNT  = 0x1810,
+       PPMU_READ_UNDERFLOW_CNT = 0x1814,
+       PPMU_WRITE_OVERFLOW_CNT = 0x1850,
+       PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
+       PPMU_READ_PENDING_CNT   = 0x1880,
+       PPMU_WRITE_PENDING_CNT  = 0x1884
+};
+
+/* PMNC register */
+#define PPMU_PMNC_CC_RESET_SHIFT       2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT  1
+#define PPMU_PMNC_ENABLE_SHIFT         0
+#define PPMU_PMNC_START_MODE_MASK      BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK      BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK                BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK   BIT(1)
+#define PPMU_PMNC_ENABLE_MASK          BIT(0)
+
+/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
+#define PPMU_CCNT_MASK                 BIT(31)
+#define PPMU_PMCNT3_MASK               BIT(3)
+#define PPMU_PMCNT2_MASK               BIT(2)
+#define PPMU_PMCNT1_MASK               BIT(1)
+#define PPMU_PMCNT0_MASK               BIT(0)
+
+/* PPMU_PMNCTx/PPMU_BETxSEL registers */
+#define PPMU_PMNCT(x)                  (PPMU_PMCNT0 + (0x10 * x))
+#define PPMU_BEVTxSEL(x)               (PPMU_BEVT0SEL + (0x100 * x))
+
+#endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
new file mode 100644 (file)
index 0000000..3479096
--- /dev/null
@@ -0,0 +1,718 @@
+/*
+ * A devfreq driver for NVIDIA Tegra SoCs
+ *
+ * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2014 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "governor.h"
+
+#define ACTMON_GLB_STATUS                                      0x0
+#define ACTMON_GLB_PERIOD_CTRL                                 0x4
+
+#define ACTMON_DEV_CTRL                                                0x0
+#define ACTMON_DEV_CTRL_K_VAL_SHIFT                            10
+#define ACTMON_DEV_CTRL_ENB_PERIODIC                           BIT(18)
+#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN                     BIT(20)
+#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN                     BIT(21)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT      23
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT      26
+#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN             BIT(29)
+#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN             BIT(30)
+#define ACTMON_DEV_CTRL_ENB                                    BIT(31)
+
+#define ACTMON_DEV_UPPER_WMARK                                 0x4
+#define ACTMON_DEV_LOWER_WMARK                                 0x8
+#define ACTMON_DEV_INIT_AVG                                    0xc
+#define ACTMON_DEV_AVG_UPPER_WMARK                             0x10
+#define ACTMON_DEV_AVG_LOWER_WMARK                             0x14
+#define ACTMON_DEV_COUNT_WEIGHT                                        0x18
+#define ACTMON_DEV_AVG_COUNT                                   0x20
+#define ACTMON_DEV_INTR_STATUS                                 0x24
+
+#define ACTMON_INTR_STATUS_CLEAR                               0xffffffff
+
+#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER                      BIT(31)
+#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER                      BIT(30)
+
+#define ACTMON_ABOVE_WMARK_WINDOW                              1
+#define ACTMON_BELOW_WMARK_WINDOW                              3
+#define ACTMON_BOOST_FREQ_STEP                                 16000
+
+/* activity counter is incremented every 256 memory transactions, and each
+ * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
+ * 4 * 256 = 1024.
+ */
+#define ACTMON_COUNT_WEIGHT                                    0x400
+
+/*
+ * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
+ * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
+ */
+#define ACTMON_AVERAGE_WINDOW_LOG2                     6
+#define ACTMON_SAMPLING_PERIOD                         12 /* ms */
+#define ACTMON_DEFAULT_AVG_BAND                                6  /* 1/10 of % */
+
+#define KHZ                                                    1000
+
+/* Assume that the bus is saturated if the utilization is 25% */
+#define BUS_SATURATION_RATIO                                   25
+
+/**
+ * struct tegra_devfreq_device_config - configuration specific to an ACTMON
+ * device
+ *
+ * Coefficients and thresholds are in %
+ */
+struct tegra_devfreq_device_config {
+       u32             offset;
+       u32             irq_mask;
+
+       unsigned int    boost_up_coeff;
+       unsigned int    boost_down_coeff;
+       unsigned int    boost_up_threshold;
+       unsigned int    boost_down_threshold;
+       u32             avg_dependency_threshold;
+};
+
+enum tegra_actmon_device {
+       MCALL = 0,
+       MCCPU,
+};
+
+static struct tegra_devfreq_device_config actmon_device_configs[] = {
+       {
+               /* MCALL */
+               .offset = 0x1c0,
+               .irq_mask = 1 << 26,
+               .boost_up_coeff = 200,
+               .boost_down_coeff = 50,
+               .boost_up_threshold = 60,
+               .boost_down_threshold = 40,
+       },
+       {
+               /* MCCPU */
+               .offset = 0x200,
+               .irq_mask = 1 << 25,
+               .boost_up_coeff = 800,
+               .boost_down_coeff = 90,
+               .boost_up_threshold = 27,
+               .boost_down_threshold = 10,
+               .avg_dependency_threshold = 50000,
+       },
+};
+
+/**
+ * struct tegra_devfreq_device - state specific to an ACTMON device
+ *
+ * Frequencies are in kHz.
+ */
+struct tegra_devfreq_device {
+       const struct tegra_devfreq_device_config *config;
+
+       void __iomem    *regs;
+       u32             avg_band_freq;
+       u32             avg_count;
+
+       unsigned long   target_freq;
+       unsigned long   boost_freq;
+};
+
+struct tegra_devfreq {
+       struct devfreq          *devfreq;
+
+       struct platform_device  *pdev;
+       struct reset_control    *reset;
+       struct clk              *clock;
+       void __iomem            *regs;
+
+       spinlock_t              lock;
+
+       struct clk              *emc_clock;
+       unsigned long           max_freq;
+       unsigned long           cur_freq;
+       struct notifier_block   rate_change_nb;
+
+       struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
+};
+
+struct tegra_actmon_emc_ratio {
+       unsigned long cpu_freq;
+       unsigned long emc_freq;
+};
+
+static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+       { 1400000, ULONG_MAX },
+       { 1200000,    750000 },
+       { 1100000,    600000 },
+       { 1000000,    500000 },
+       {  800000,    375000 },
+       {  500000,    200000 },
+       {  250000,    100000 },
+};
+
+static unsigned long do_percent(unsigned long val, unsigned int pct)
+{
+       return val * pct / 100;
+}
+
+static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
+{
+       u32 avg = dev->avg_count;
+       u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
+
+       writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
+       avg = max(avg, band);
+       writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
+}
+
+static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
+                                      struct tegra_devfreq_device *dev)
+{
+       u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+
+       writel(do_percent(val, dev->config->boost_up_threshold),
+              dev->regs + ACTMON_DEV_UPPER_WMARK);
+
+       writel(do_percent(val, dev->config->boost_down_threshold),
+              dev->regs + ACTMON_DEV_LOWER_WMARK);
+}
+
+static void actmon_write_barrier(struct tegra_devfreq *tegra)
+{
+       /* ensure the update has reached the ACTMON */
+       wmb();
+       readl(tegra->regs + ACTMON_GLB_STATUS);
+}
+
+static irqreturn_t actmon_isr(int irq, void *data)
+{
+       struct tegra_devfreq *tegra = data;
+       struct tegra_devfreq_device *dev = NULL;
+       unsigned long flags;
+       u32 val;
+       unsigned int i;
+
+       val = readl(tegra->regs + ACTMON_GLB_STATUS);
+
+       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+               if (val & tegra->devices[i].config->irq_mask) {
+                       dev = tegra->devices + i;
+                       break;
+               }
+       }
+
+       if (!dev)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&tegra->lock, flags);
+
+       dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
+       tegra_devfreq_update_avg_wmark(dev);
+
+       val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
+       if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
+               val = readl(dev->regs + ACTMON_DEV_CTRL) |
+                       ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
+                       ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+               /*
+                * new_boost = min(old_boost * up_coef + step, max_freq)
+                */
+               dev->boost_freq = do_percent(dev->boost_freq,
+                                            dev->config->boost_up_coeff);
+               dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
+               if (dev->boost_freq >= tegra->max_freq) {
+                       dev->boost_freq = tegra->max_freq;
+                       val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+               }
+               writel(val, dev->regs + ACTMON_DEV_CTRL);
+       } else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
+               val = readl(dev->regs + ACTMON_DEV_CTRL) |
+                       ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
+                       ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+
+               /*
+                * new_boost = old_boost * down_coef
+                * or 0 if (old_boost * down_coef < step / 2)
+                */
+               dev->boost_freq = do_percent(dev->boost_freq,
+                                            dev->config->boost_down_coeff);
+               if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
+                       dev->boost_freq = 0;
+                       val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+               }
+               writel(val, dev->regs + ACTMON_DEV_CTRL);
+       }
+
+       if (dev->config->avg_dependency_threshold) {
+               val = readl(dev->regs + ACTMON_DEV_CTRL);
+               if (dev->avg_count >= dev->config->avg_dependency_threshold)
+                       val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+               else if (dev->boost_freq == 0)
+                       val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
+               writel(val, dev->regs + ACTMON_DEV_CTRL);
+       }
+
+       writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
+
+       actmon_write_barrier(tegra);
+
+       spin_unlock_irqrestore(&tegra->lock, flags);
+
+       return IRQ_WAKE_THREAD;
+}
+
+static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
+                                           unsigned long cpu_freq)
+{
+       unsigned int i;
+       struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
+
+       for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
+               if (cpu_freq >= ratio->cpu_freq) {
+                       if (ratio->emc_freq >= tegra->max_freq)
+                               return tegra->max_freq;
+                       else
+                               return ratio->emc_freq;
+               }
+       }
+
+       return 0;
+}
+
+static void actmon_update_target(struct tegra_devfreq *tegra,
+                                struct tegra_devfreq_device *dev)
+{
+       unsigned long cpu_freq = 0;
+       unsigned long static_cpu_emc_freq = 0;
+       unsigned int avg_sustain_coef;
+       unsigned long flags;
+
+       if (dev->config->avg_dependency_threshold) {
+               cpu_freq = cpufreq_get(0);
+               static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
+       }
+
+       spin_lock_irqsave(&tegra->lock, flags);
+
+       dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
+       avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
+       dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
+       dev->target_freq += dev->boost_freq;
+
+       if (dev->avg_count >= dev->config->avg_dependency_threshold)
+               dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
+
+       spin_unlock_irqrestore(&tegra->lock, flags);
+}
+
+static irqreturn_t actmon_thread_isr(int irq, void *data)
+{
+       struct tegra_devfreq *tegra = data;
+
+       mutex_lock(&tegra->devfreq->lock);
+       update_devfreq(tegra->devfreq);
+       mutex_unlock(&tegra->devfreq->lock);
+
+       return IRQ_HANDLED;
+}
+
+static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
+                                      unsigned long action, void *ptr)
+{
+       struct clk_notifier_data *data = ptr;
+       struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
+                                                  rate_change_nb);
+       unsigned int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tegra->lock, flags);
+
+       switch (action) {
+       case POST_RATE_CHANGE:
+               tegra->cur_freq = data->new_rate / KHZ;
+
+               for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
+                       tegra_devfreq_update_wmark(tegra, tegra->devices + i);
+
+               actmon_write_barrier(tegra);
+               break;
+       case PRE_RATE_CHANGE:
+               /* fall through */
+       case ABORT_RATE_CHANGE:
+               break;
+       };
+
+       spin_unlock_irqrestore(&tegra->lock, flags);
+
+       return NOTIFY_OK;
+}
+
+static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
+                                         struct tegra_devfreq_device *dev)
+{
+       u32 val;
+
+       dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
+       dev->target_freq = tegra->cur_freq;
+
+       dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
+       writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
+
+       tegra_devfreq_update_avg_wmark(dev);
+       tegra_devfreq_update_wmark(tegra, dev);
+
+       writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
+       writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
+
+       val = 0;
+       val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
+              ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
+              ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
+       val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
+               << ACTMON_DEV_CTRL_K_VAL_SHIFT;
+       val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
+               << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
+       val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
+               << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
+       val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
+              ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
+
+       writel(val, dev->regs + ACTMON_DEV_CTRL);
+
+       actmon_write_barrier(tegra);
+
+       val = readl(dev->regs + ACTMON_DEV_CTRL);
+       val |= ACTMON_DEV_CTRL_ENB;
+       writel(val, dev->regs + ACTMON_DEV_CTRL);
+
+       actmon_write_barrier(tegra);
+}
+
+static int tegra_devfreq_suspend(struct device *dev)
+{
+       struct platform_device *pdev;
+       struct tegra_devfreq *tegra;
+       struct tegra_devfreq_device *actmon_dev;
+       unsigned int i;
+       u32 val;
+
+       pdev = container_of(dev, struct platform_device, dev);
+       tegra = platform_get_drvdata(pdev);
+
+       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+               actmon_dev = &tegra->devices[i];
+
+               val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
+               val &= ~ACTMON_DEV_CTRL_ENB;
+               writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
+
+               writel(ACTMON_INTR_STATUS_CLEAR,
+                      actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
+
+               actmon_write_barrier(tegra);
+       }
+
+       return 0;
+}
+
+static int tegra_devfreq_resume(struct device *dev)
+{
+       struct platform_device *pdev;
+       struct tegra_devfreq *tegra;
+       struct tegra_devfreq_device *actmon_dev;
+       unsigned int i;
+
+       pdev = container_of(dev, struct platform_device, dev);
+       tegra = platform_get_drvdata(pdev);
+
+       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+               actmon_dev = &tegra->devices[i];
+
+               tegra_actmon_configure_device(tegra, actmon_dev);
+       }
+
+       return 0;
+}
+
+static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
+                               u32 flags)
+{
+       struct platform_device *pdev;
+       struct tegra_devfreq *tegra;
+       struct dev_pm_opp *opp;
+       unsigned long rate = *freq * KHZ;
+
+       pdev = container_of(dev, struct platform_device, dev);
+       tegra = platform_get_drvdata(pdev);
+
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, &rate, flags);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
+               dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
+               return PTR_ERR(opp);
+       }
+       rate = dev_pm_opp_get_freq(opp);
+       rcu_read_unlock();
+
+       /* TODO: Once we have per-user clk constraints, set a floor */
+       clk_set_rate(tegra->emc_clock, rate);
+
+       /* TODO: Set voltage as well */
+
+       return 0;
+}
+
+static int tegra_devfreq_get_dev_status(struct device *dev,
+                                       struct devfreq_dev_status *stat)
+{
+       struct platform_device *pdev;
+       struct tegra_devfreq *tegra;
+       struct tegra_devfreq_device *actmon_dev;
+
+       pdev = container_of(dev, struct platform_device, dev);
+       tegra = platform_get_drvdata(pdev);
+
+       stat->current_frequency = tegra->cur_freq;
+
+       /* To be used by the tegra governor */
+       stat->private_data = tegra;
+
+       /* The below are to be used by the other governors */
+
+       actmon_dev = &tegra->devices[MCALL];
+
+       /* Number of cycles spent on memory access */
+       stat->busy_time = actmon_dev->avg_count;
+
+       /* The bus can be considered to be saturated way before 100% */
+       stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+
+       /* Number of cycles in a sampling period */
+       stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
+
+       return 0;
+}
+
+static int tegra_devfreq_get_target(struct devfreq *devfreq,
+                                   unsigned long *freq)
+{
+       struct devfreq_dev_status stat;
+       struct tegra_devfreq *tegra;
+       struct tegra_devfreq_device *dev;
+       unsigned long target_freq = 0;
+       unsigned int i;
+       int err;
+
+       err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
+       if (err)
+               return err;
+
+       tegra = stat.private_data;
+
+       for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
+               dev = &tegra->devices[i];
+
+               actmon_update_target(tegra, dev);
+
+               target_freq = max(target_freq, dev->target_freq);
+       }
+
+       *freq = target_freq;
+
+       return 0;
+}
+
+static int tegra_devfreq_event_handler(struct devfreq *devfreq,
+                                      unsigned int event, void *data)
+{
+       return 0;
+}
+
+static struct devfreq_governor tegra_devfreq_governor = {
+       .name = "tegra",
+       .get_target_freq = tegra_devfreq_get_target,
+       .event_handler = tegra_devfreq_event_handler,
+};
+
+static struct devfreq_dev_profile tegra_devfreq_profile = {
+       .polling_ms     = 0,
+       .target         = tegra_devfreq_target,
+       .get_dev_status = tegra_devfreq_get_dev_status,
+};
+
+static int tegra_devfreq_probe(struct platform_device *pdev)
+{
+       struct tegra_devfreq *tegra;
+       struct tegra_devfreq_device *dev;
+       struct resource *res;
+       unsigned long max_freq;
+       unsigned int i;
+       int irq;
+       int err;
+
+       tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+       if (!tegra)
+               return -ENOMEM;
+
+       spin_lock_init(&tegra->lock);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Failed to get regs resource\n");
+               return -ENODEV;
+       }
+
+       tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(tegra->regs)) {
+               dev_err(&pdev->dev, "Failed to get IO memory\n");
+               return PTR_ERR(tegra->regs);
+       }
+
+       tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
+       if (IS_ERR(tegra->reset)) {
+               dev_err(&pdev->dev, "Failed to get reset\n");
+               return PTR_ERR(tegra->reset);
+       }
+
+       tegra->clock = devm_clk_get(&pdev->dev, "actmon");
+       if (IS_ERR(tegra->clock)) {
+               dev_err(&pdev->dev, "Failed to get actmon clock\n");
+               return PTR_ERR(tegra->clock);
+       }
+
+       tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
+       if (IS_ERR(tegra->emc_clock)) {
+               dev_err(&pdev->dev, "Failed to get emc clock\n");
+               return PTR_ERR(tegra->emc_clock);
+       }
+
+       err = of_init_opp_table(&pdev->dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to init operating point table\n");
+               return err;
+       }
+
+       tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
+       err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "Failed to register rate change notifier\n");
+               return err;
+       }
+
+       reset_control_assert(tegra->reset);
+
+       err = clk_prepare_enable(tegra->clock);
+       if (err) {
+               reset_control_deassert(tegra->reset);
+               return err;
+       }
+
+       reset_control_deassert(tegra->reset);
+
+       max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
+       tegra->max_freq = max_freq / KHZ;
+
+       clk_set_rate(tegra->emc_clock, max_freq);
+
+       tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
+
+       writel(ACTMON_SAMPLING_PERIOD - 1,
+              tegra->regs + ACTMON_GLB_PERIOD_CTRL);
+
+       for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
+               dev = tegra->devices + i;
+               dev->config = actmon_device_configs + i;
+               dev->regs = tegra->regs + dev->config->offset;
+
+               tegra_actmon_configure_device(tegra, tegra->devices + i);
+       }
+
+       err = devfreq_add_governor(&tegra_devfreq_governor);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to add governor\n");
+               return err;
+       }
+
+       tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
+       tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
+                                                &tegra_devfreq_profile,
+                                                "tegra",
+                                                NULL);
+
+       irq = platform_get_irq(pdev, 0);
+       err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
+                                       actmon_thread_isr, IRQF_SHARED,
+                                       "tegra-devfreq", tegra);
+       if (err) {
+               dev_err(&pdev->dev, "Interrupt request failed\n");
+               return err;
+       }
+
+       platform_set_drvdata(pdev, tegra);
+
+       return 0;
+}
+
+static int tegra_devfreq_remove(struct platform_device *pdev)
+{
+       struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
+
+       clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
+
+       clk_disable_unprepare(tegra->clock);
+
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
+                        tegra_devfreq_suspend,
+                        tegra_devfreq_resume);
+
+static struct of_device_id tegra_devfreq_of_match[] = {
+       { .compatible = "nvidia,tegra124-actmon" },
+       { },
+};
+
+static struct platform_driver tegra_devfreq_driver = {
+       .probe  = tegra_devfreq_probe,
+       .remove = tegra_devfreq_remove,
+       .driver = {
+               .name           = "tegra-devfreq",
+               .owner          = THIS_MODULE,
+               .of_match_table = tegra_devfreq_of_match,
+               .pm             = &tegra_devfreq_pm_ops,
+       },
+};
+module_platform_driver(tegra_devfreq_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Tegra devfreq driver");
+MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
+MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
index de361a1..5a63564 100644 (file)
@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
 {
        const struct acpi_csrt_shared_info *si;
        struct list_head resource_list;
-       struct resource_list_entry *rentry;
+       struct resource_entry *rentry;
        resource_size_t mem = 0, irq = 0;
        int ret;
 
@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
                return 0;
 
        list_for_each_entry(rentry, &resource_list, node) {
-               if (resource_type(&rentry->res) == IORESOURCE_MEM)
-                       mem = rentry->res.start;
-               else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
-                       irq = rentry->res.start;
+               if (resource_type(rentry->res) == IORESOURCE_MEM)
+                       mem = rentry->res->start;
+               else if (resource_type(rentry->res) == IORESOURCE_IRQ)
+                       irq = rentry->res->start;
        }
 
        acpi_dev_free_resource_list(&resource_list);
index 4d6b269..bb3725b 100644 (file)
@@ -861,8 +861,8 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
                break;
 
        case ACPI_RESOURCE_TYPE_ADDRESS64:
-               hyperv_mmio.start = res->data.address64.minimum;
-               hyperv_mmio.end = res->data.address64.maximum;
+               hyperv_mmio.start = res->data.address64.address.minimum;
+               hyperv_mmio.end = res->data.address64.address.maximum;
                break;
        }
 
index 6dbf6fc..e8902f8 100644 (file)
@@ -386,7 +386,7 @@ static int __init pcc_init(void)
        ret = acpi_pcc_probe();
 
        if (ret) {
-               pr_err("ACPI PCC probe failed.\n");
+               pr_debug("ACPI PCC probe failed.\n");
                return -ENODEV;
        }
 
@@ -394,7 +394,7 @@ static int __init pcc_init(void)
                        pcc_mbox_probe, NULL, 0, NULL, 0);
 
        if (!pcc_pdev) {
-               pr_err("Err creating PCC platform bundle\n");
+               pr_debug("Err creating PCC platform bundle\n");
                return -ENODEV;
        }
 
index e07ce5f..b10964e 100644 (file)
@@ -553,8 +553,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
        if (lp->cardtype == PAM_CARD ||
                memaddr == (unsigned short *)0xffe00000) {
                /* PAMs card and Riebl on ST use level 5 autovector */
-               if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
-                           "PAM,Riebl-ST Ethernet", dev)) {
+               if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
+                               "PAM,Riebl-ST Ethernet", dev)) {
                        printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
                        return 0;
                }
@@ -567,8 +567,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
                        printk( "Lance: request for VME interrupt failed\n" );
                        return 0;
                }
-               if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
-                           "Riebl-VME Ethernet", dev)) {
+               if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
+                               dev)) {
                        printk( "Lance: request for irq %u failed\n", irq );
                        return 0;
                }
index 14a1c5c..fa274e0 100644 (file)
@@ -4915,7 +4915,7 @@ static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
 
        RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
        RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
-       rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+       rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
 }
 
 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4948,7 +4948,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
        RTL_W8(MaxTxPacketSize, 0x3f);
        RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
        RTL_W8(Config4, RTL_R8(Config4) | 0x01);
-       rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+       rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
 }
 
 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
@@ -4964,7 +4964,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
 {
        rtl_tx_performance_tweak(tp->pci_dev,
-               (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+               PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
 }
 
 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
index 037f74f..12f9e27 100644 (file)
@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
         * better enable it. The long term solution would be to use just a
         * bunch of valid page descriptors, without dependency on ballooning
         */
-       err = alloc_xenballooned_pages(MAX_PENDING_REQS,
-                                      queue->mmap_pages,
-                                      false);
+       err = gnttab_alloc_pages(MAX_PENDING_REQS,
+                                queue->mmap_pages);
        if (err) {
                netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
                return -ENOMEM;
@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
  */
 void xenvif_deinit_queue(struct xenvif_queue *queue)
 {
-       free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
+       gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
 }
 
 void xenvif_free(struct xenvif *vif)
index c8ce701..7dc2d64 100644 (file)
@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
-                                unsigned long offset, int *head,
-                                struct xenvif_queue *foreign_queue,
-                                grant_ref_t foreign_gref)
+                                unsigned long offset, int *head)
 {
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
        offset &= ~PAGE_MASK;
 
        while (size > 0) {
+               struct xen_page_foreign *foreign;
+
                BUG_ON(offset >= PAGE_SIZE);
                BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
 
@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                copy_gop->flags = GNTCOPY_dest_gref;
                copy_gop->len = bytes;
 
-               if (foreign_queue) {
-                       copy_gop->source.domid = foreign_queue->vif->domid;
-                       copy_gop->source.u.ref = foreign_gref;
+               foreign = xen_page_foreign(page);
+               if (foreign) {
+                       copy_gop->source.domid = foreign->domid;
+                       copy_gop->source.u.ref = foreign->gref;
                        copy_gop->flags |= GNTCOPY_source_gref;
                } else {
                        copy_gop->source.domid = DOMID_SELF;
@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
        }
 }
 
-/*
- * Find the grant ref for a given frag in a chain of struct ubuf_info's
- * skb: the skb itself
- * i: the frag's number
- * ubuf: a pointer to an element in the chain. It should not be NULL
- *
- * Returns a pointer to the element in the chain where the page were found. If
- * not found, returns NULL.
- * See the definition of callback_struct in common.h for more details about
- * the chain.
- */
-static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
-                                               const int i,
-                                               const struct ubuf_info *ubuf)
-{
-       struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
-
-       do {
-               u16 pending_idx = ubuf->desc;
-
-               if (skb_shinfo(skb)->frags[i].page.p ==
-                   foreign_queue->mmap_pages[pending_idx])
-                       break;
-               ubuf = (struct ubuf_info *) ubuf->ctx;
-       } while (ubuf);
-
-       return ubuf;
-}
-
 /*
  * Prepare an SKB to be transmitted to the frontend.
  *
@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
-       const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
-       const struct ubuf_info *const head_ubuf = ubuf;
 
        old_meta_prod = npo->meta_prod;
 
@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        len = skb_tail_pointer(skb) - data;
 
                xenvif_gop_frag_copy(queue, skb, npo,
-                                    virt_to_page(data), len, offset, &head,
-                                    NULL,
-                                    0);
+                                    virt_to_page(data), len, offset, &head);
                data += len;
        }
 
        for (i = 0; i < nr_frags; i++) {
-               /* This variable also signals whether foreign_gref has a real
-                * value or not.
-                */
-               struct xenvif_queue *foreign_queue = NULL;
-               grant_ref_t foreign_gref;
-
-               if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
-                       (ubuf->callback == &xenvif_zerocopy_callback)) {
-                       const struct ubuf_info *const startpoint = ubuf;
-
-                       /* Ideally ubuf points to the chain element which
-                        * belongs to this frag. Or if frags were removed from
-                        * the beginning, then shortly before it.
-                        */
-                       ubuf = xenvif_find_gref(skb, i, ubuf);
-
-                       /* Try again from the beginning of the list, if we
-                        * haven't tried from there. This only makes sense in
-                        * the unlikely event of reordering the original frags.
-                        * For injected local pages it's an unnecessary second
-                        * run.
-                        */
-                       if (unlikely(!ubuf) && startpoint != head_ubuf)
-                               ubuf = xenvif_find_gref(skb, i, head_ubuf);
-
-                       if (likely(ubuf)) {
-                               u16 pending_idx = ubuf->desc;
-
-                               foreign_queue = ubuf_to_queue(ubuf);
-                               foreign_gref =
-                                       foreign_queue->pending_tx_info[pending_idx].req.gref;
-                               /* Just a safety measure. If this was the last
-                                * element on the list, the for loop will
-                                * iterate again if a local page were added to
-                                * the end. Using head_ubuf here prevents the
-                                * second search on the chain. Or the original
-                                * frags changed order, but that's less likely.
-                                * In any way, ubuf shouldn't be NULL.
-                                */
-                               ubuf = ubuf->ctx ?
-                                       (struct ubuf_info *) ubuf->ctx :
-                                       head_ubuf;
-                       } else
-                               /* This frag was a local page, added to the
-                                * array after the skb left netback.
-                                */
-                               ubuf = head_ubuf;
-               }
                xenvif_gop_frag_copy(queue, skb, npo,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
-                                    &head,
-                                    foreign_queue,
-                                    foreign_queue ? foreign_gref : UINT_MAX);
+                                    &head);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
                /* Take an extra reference to offset network stack's put_page */
                get_page(queue->mmap_pages[pending_idx]);
        }
-       /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
-        * overlaps with "index", and "mapping" is not set. I think mapping
-        * should be set. If delivered to local stack, it would drop this
-        * skb in sk_filter unless the socket has the right to use it.
-        */
-       skb->pfmemalloc = false;
 }
 
 static int xenvif_get_extras(struct xenvif_queue *queue,
index 88471d3..110fece 100644 (file)
@@ -140,6 +140,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
                        unsigned char busno, unsigned char bus_max,
                        struct list_head *resources, resource_size_t *io_base)
 {
+       struct resource_entry *window;
        struct resource *res;
        struct resource *bus_range;
        struct of_pci_range range;
@@ -225,7 +226,10 @@ int of_pci_get_host_bridge_resources(struct device_node *dev,
 conversion_failed:
        kfree(res);
 parse_failed:
+       resource_list_for_each_entry(window, resources)
+               kfree(window->res);
        pci_free_resource_list(resources);
+       kfree(bus_range);
        return err;
 }
 EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources);
index 7ad59ac..a81cd2a 100644 (file)
@@ -192,8 +192,8 @@ static int __init parport_atari_init(void)
                                          &parport_atari_ops);
                if (!p)
                        return -ENODEV;
-               if (request_irq(IRQ_MFP_BUSY, parport_irq_handler,
-                               IRQ_TYPE_SLOW, p->name, p)) {
+               if (request_irq(IRQ_MFP_BUSY, parport_irq_handler, 0, p->name,
+                               p)) {
                        parport_put_port (p);
                        return -ENODEV;
                }
index 49dd766..d9b64a1 100644 (file)
@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
 EXPORT_SYMBOL(pci_bus_write_config_word);
 EXPORT_SYMBOL(pci_bus_write_config_dword);
 
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+                           int where, int size, u32 *val)
+{
+       void __iomem *addr;
+
+       addr = bus->ops->map_bus(bus, devfn, where);
+       if (!addr) {
+               *val = ~0;
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       }
+
+       if (size == 1)
+               *val = readb(addr);
+       else if (size == 2)
+               *val = readw(addr);
+       else
+               *val = readl(addr);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read);
+
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+                            int where, int size, u32 val)
+{
+       void __iomem *addr;
+
+       addr = bus->ops->map_bus(bus, devfn, where);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if (size == 1)
+               writeb(val, addr);
+       else if (size == 2)
+               writew(val, addr);
+       else
+               writel(val, addr);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write);
+
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+                             int where, int size, u32 *val)
+{
+       void __iomem *addr;
+
+       addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+       if (!addr) {
+               *val = ~0;
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       }
+
+       *val = readl(addr);
+
+       if (size <= 2)
+               *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read32);
+
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+                              int where, int size, u32 val)
+{
+       void __iomem *addr;
+       u32 mask, tmp;
+
+       addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+       if (!addr)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       if (size == 4) {
+               writel(val, addr);
+               return PCIBIOS_SUCCESSFUL;
+       } else {
+               mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+       }
+
+       tmp = readl(addr) & mask;
+       tmp |= val << ((where & 0x3) * 8);
+       writel(tmp, addr);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write32);
+
 /**
  * pci_bus_set_ops - Set raw operations of pci bus
  * @bus:       pci bus struct
index 8fb1618..90fa3a7 100644 (file)
 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
                             resource_size_t offset)
 {
-       struct pci_host_bridge_window *window;
+       struct resource_entry *entry;
 
-       window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL);
-       if (!window) {
+       entry = resource_list_create_entry(res, 0);
+       if (!entry) {
                printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
                return;
        }
 
-       window->res = res;
-       window->offset = offset;
-       list_add_tail(&window->list, resources);
+       entry->offset = offset;
+       resource_list_add_tail(entry, resources);
 }
 EXPORT_SYMBOL(pci_add_resource_offset);
 
@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource);
 
 void pci_free_resource_list(struct list_head *resources)
 {
-       struct pci_host_bridge_window *window, *tmp;
-
-       list_for_each_entry_safe(window, tmp, resources, list) {
-               list_del(&window->list);
-               kfree(window);
-       }
+       resource_list_free(resources);
 }
 EXPORT_SYMBOL(pci_free_resource_list);
 
index 0e5f3c9..39b2dbe 100644 (file)
@@ -35,10 +35,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
                             struct resource *res)
 {
        struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
        resource_size_t offset = 0;
 
-       list_for_each_entry(window, &bridge->windows, list) {
+       resource_list_for_each_entry(window, &bridge->windows) {
                if (resource_contains(window->res, res)) {
                        offset = window->offset;
                        break;
@@ -60,10 +60,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
                             struct pci_bus_region *region)
 {
        struct pci_host_bridge *bridge = find_pci_host_bridge(bus);
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
        resource_size_t offset = 0;
 
-       list_for_each_entry(window, &bridge->windows, list) {
+       resource_list_for_each_entry(window, &bridge->windows) {
                struct pci_bus_region bus_region;
 
                if (resource_type(res) != resource_type(window->res))
index c4b6568..7b892a9 100644 (file)
@@ -102,4 +102,8 @@ config PCI_LAYERSCAPE
        help
          Say Y here if you want PCIe controller support on Layerscape SoCs.
 
+config PCI_VERSATILE
+       bool "ARM Versatile PB PCI controller"
+       depends on ARCH_VERSATILE
+
 endmenu
index 44c2699..e61d91c 100644 (file)
@@ -12,3 +12,4 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
 obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
 obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
 obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
index 6eb1aa7..ba46e58 100644 (file)
@@ -76,55 +76,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
        .map_bus        = gen_pci_map_cfg_bus_ecam,
 };
 
-static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
-                               int where, int size, u32 *val)
-{
-       void __iomem *addr;
-       struct pci_sys_data *sys = bus->sysdata;
-       struct gen_pci *pci = sys->private_data;
-
-       addr = pci->cfg.ops->map_bus(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               *val = readb(addr);
-               break;
-       case 2:
-               *val = readw(addr);
-               break;
-       default:
-               *val = readl(addr);
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 val)
-{
-       void __iomem *addr;
-       struct pci_sys_data *sys = bus->sysdata;
-       struct gen_pci *pci = sys->private_data;
-
-       addr = pci->cfg.ops->map_bus(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               writeb(val, addr);
-               break;
-       case 2:
-               writew(val, addr);
-               break;
-       default:
-               writel(val, addr);
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
 static struct pci_ops gen_pci_ops = {
-       .read   = gen_pci_config_read,
-       .write  = gen_pci_config_write,
+       .read   = pci_generic_config_read,
+       .write  = pci_generic_config_write,
 };
 
 static const struct of_device_id gen_pci_of_match[] = {
@@ -149,14 +103,14 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
        struct device *dev = pci->host.dev.parent;
        struct device_node *np = dev->of_node;
        resource_size_t iobase;
-       struct pci_host_bridge_window *win;
+       struct resource_entry *win;
 
        err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
                                               &iobase);
        if (err)
                return err;
 
-       list_for_each_entry(win, &pci->resources, list) {
+       resource_list_for_each_entry(win, &pci->resources) {
                struct resource *parent, *res = win->res;
 
                switch (resource_type(res)) {
@@ -287,6 +241,7 @@ static int gen_pci_probe(struct platform_device *pdev)
 
        of_id = of_match_node(gen_pci_of_match, np);
        pci->cfg.ops = of_id->data;
+       gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
        pci->host.dev.parent = dev;
        INIT_LIST_HEAD(&pci->host.windows);
        INIT_LIST_HEAD(&pci->resources);
index 78f79e3..75333b0 100644 (file)
@@ -119,7 +119,7 @@ static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
        struct pcie_port *pp = &ks_pcie->pp;
        struct irq_chip *chip = irq_desc_get_chip(desc);
 
-       dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
+       dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
 
        /*
         * The chained irq handler installation would have replaced normal
@@ -197,7 +197,7 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
         */
        for (temp = 0; temp < max_host_irqs; temp++) {
                host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
-               if (host_irqs[temp] < 0)
+               if (!host_irqs[temp])
                        break;
        }
        if (temp) {
index 6697b1a..68c9e5e 100644 (file)
@@ -167,7 +167,6 @@ MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
 static struct platform_driver ls_pcie_driver = {
        .driver = {
                .name = "layerscape-pcie",
-               .owner = THIS_MODULE,
                .of_match_table = ls_pcie_of_match,
        },
 };
index 1dd7595..1309cfb 100644 (file)
@@ -101,9 +101,7 @@ struct mvebu_pcie {
        struct mvebu_pcie_port *ports;
        struct msi_controller *msi;
        struct resource io;
-       char io_name[30];
        struct resource realio;
-       char mem_name[30];
        struct resource mem;
        struct resource busn;
        int nports;
@@ -723,18 +721,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
 {
        struct mvebu_pcie *pcie = sys_to_pcie(sys);
        int i;
-       int domain = 0;
 
-#ifdef CONFIG_PCI_DOMAINS
-       domain = sys->domain;
-#endif
-
-       snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x",
-                domain);
-       pcie->mem.name = pcie->mem_name;
-
-       snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain);
-       pcie->realio.name = pcie->io_name;
+       pcie->mem.name = "PCI MEM";
+       pcie->realio.name = "PCI I/O";
 
        if (request_resource(&iomem_resource, &pcie->mem))
                return 0;
index d9c042f..dd6b84e 100644 (file)
@@ -131,52 +131,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
        return priv->reg + (slot >> 1) * 0x100 + where;
 }
 
-static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
-                               int where, int size, u32 *val)
-{
-       void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
-
-       if (!reg)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (size) {
-       case 1:
-               *val = ioread8(reg);
-               break;
-       case 2:
-               *val = ioread16(reg);
-               break;
-       default:
-               *val = ioread32(reg);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 val)
-{
-       void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
-
-       if (!reg)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       switch (size) {
-       case 1:
-               iowrite8(val, reg);
-               break;
-       case 2:
-               iowrite16(val, reg);
-               break;
-       default:
-               iowrite32(val, reg);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
 /* PCI interrupt mapping */
 static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
@@ -325,8 +279,9 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
 }
 
 static struct pci_ops rcar_pci_ops = {
-       .read   = rcar_pci_read_config,
-       .write  = rcar_pci_write_config,
+       .map_bus = rcar_pci_cfg_base,
+       .read   = pci_generic_config_read,
+       .write  = pci_generic_config_write,
 };
 
 static int rcar_pci_probe(struct platform_device *pdev)
index a800ae9..00e9272 100644 (file)
@@ -480,59 +480,10 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
        return addr;
 }
 
-static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
-                               int where, int size, u32 *value)
-{
-       void __iomem *addr;
-
-       addr = tegra_pcie_conf_address(bus, devfn, where);
-       if (!addr) {
-               *value = 0xffffffff;
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       }
-
-       *value = readl(addr);
-
-       if (size == 1)
-               *value = (*value >> (8 * (where & 3))) & 0xff;
-       else if (size == 2)
-               *value = (*value >> (8 * (where & 3))) & 0xffff;
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
-                                int where, int size, u32 value)
-{
-       void __iomem *addr;
-       u32 mask, tmp;
-
-       addr = tegra_pcie_conf_address(bus, devfn, where);
-       if (!addr)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       if (size == 4) {
-               writel(value, addr);
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       if (size == 2)
-               mask = ~(0xffff << ((where & 0x3) * 8));
-       else if (size == 1)
-               mask = ~(0xff << ((where & 0x3) * 8));
-       else
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       tmp = readl(addr) & mask;
-       tmp |= value << ((where & 0x3) * 8);
-       writel(tmp, addr);
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
 static struct pci_ops tegra_pcie_ops = {
-       .read = tegra_pcie_read_conf,
-       .write = tegra_pcie_write_conf,
+       .map_bus = tegra_pcie_conf_address,
+       .read = pci_generic_config_read32,
+       .write = pci_generic_config_write32,
 };
 
 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
@@ -625,19 +576,6 @@ static void tegra_pcie_port_free(struct tegra_pcie_port *port)
        devm_kfree(pcie->dev, port);
 }
 
-static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
-{
-       u16 reg;
-
-       if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
-               pci_read_config_word(dev, PCI_COMMAND, &reg);
-               reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
-                       PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
-               pci_write_config_word(dev, PCI_COMMAND, reg);
-       }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
-
 /* Tegra PCIE root complex wrongly reports device class */
 static void tegra_pcie_fixup_class(struct pci_dev *dev)
 {
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
new file mode 100644 (file)
index 0000000..1ec694a
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2004 Koninklijke Philips Electronics NV
+ *
+ * Conversion to platform driver and DT:
+ * Copyright 2014 Linaro Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * 14/04/2005 Initial version, colin.king@philips.com
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+static void __iomem *versatile_pci_base;
+static void __iomem *versatile_cfg_base[2];
+
+#define PCI_IMAP(m)            (versatile_pci_base + ((m) * 4))
+#define PCI_SMAP(m)            (versatile_pci_base + 0x14 + ((m) * 4))
+#define PCI_SELFID             (versatile_pci_base + 0xc)
+
+#define VP_PCI_DEVICE_ID               0x030010ee
+#define VP_PCI_CLASS_ID                        0x0b400000
+
+static u32 pci_slot_ignore;
+
+static int __init versatile_pci_slot_ignore(char *str)
+{
+       int retval;
+       int slot;
+
+       while ((retval = get_option(&str, &slot))) {
+               if ((slot < 0) || (slot > 31))
+                       pr_err("Illegal slot value: %d\n", slot);
+               else
+                       pci_slot_ignore |= (1 << slot);
+       }
+       return 1;
+}
+__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
+
+
+static void __iomem *versatile_map_bus(struct pci_bus *bus,
+                                      unsigned int devfn, int offset)
+{
+       unsigned int busnr = bus->number;
+
+       if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
+               return NULL;
+
+       return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
+}
+
+static struct pci_ops pci_versatile_ops = {
+       .map_bus = versatile_map_bus,
+       .read   = pci_generic_config_read32,
+       .write  = pci_generic_config_write,
+};
+
+static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
+                                                    struct list_head *res)
+{
+       int err, mem = 1, res_valid = 0;
+       struct device_node *np = dev->of_node;
+       resource_size_t iobase;
+       struct resource_entry *win;
+
+       err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+       if (err)
+               return err;
+
+       resource_list_for_each_entry(win, res, list) {
+               struct resource *parent, *res = win->res;
+
+               switch (resource_type(res)) {
+               case IORESOURCE_IO:
+                       parent = &ioport_resource;
+                       err = pci_remap_iospace(res, iobase);
+                       if (err) {
+                               dev_warn(dev, "error %d: failed to map resource %pR\n",
+                                        err, res);
+                               continue;
+                       }
+                       break;
+               case IORESOURCE_MEM:
+                       parent = &iomem_resource;
+                       res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+
+                       writel(res->start >> 28, PCI_IMAP(mem));
+                       writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
+                       mem++;
+
+                       break;
+               case IORESOURCE_BUS:
+               default:
+                       continue;
+               }
+
+               err = devm_request_resource(dev, parent, res);
+               if (err)
+                       goto out_release_res;
+       }
+
+       if (!res_valid) {
+               dev_err(dev, "non-prefetchable memory resource required\n");
+               err = -EINVAL;
+               goto out_release_res;
+       }
+
+       return 0;
+
+out_release_res:
+       pci_free_resource_list(res);
+       return err;
+}
+
+/* Unused, temporary to satisfy ARM arch code */
+struct pci_sys_data sys;
+
+static int versatile_pci_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int ret, i, myslot = -1;
+       u32 val;
+       void __iomem *local_pci_cfg_base;
+       struct pci_bus *bus;
+       LIST_HEAD(pci_res);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+       versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return -ENODEV;
+       versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       if (!res)
+               return -ENODEV;
+       versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
+
+       ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
+       if (ret)
+               return ret;
+
+       /*
+        * We need to discover the PCI core first to configure itself
+        * before the main PCI probing is performed
+        */
+       for (i = 0; i < 32; i++) {
+               if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
+                   (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
+                       myslot = i;
+                       break;
+               }
+       }
+       if (myslot == -1) {
+               dev_err(&pdev->dev, "Cannot find PCI core!\n");
+               return -EIO;
+       }
+       /*
+        * Do not to map Versatile FPGA PCI device into memory space
+        */
+       pci_slot_ignore |= (1 << myslot);
+
+       dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
+
+       writel(myslot, PCI_SELFID);
+       local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
+
+       val = readl(local_pci_cfg_base + PCI_COMMAND);
+       val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+       writel(val, local_pci_cfg_base + PCI_COMMAND);
+
+       /*
+        * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+        */
+       writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+       writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+       writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+       /*
+        * For many years the kernel and QEMU were symbiotically buggy
+        * in that they both assumed the same broken IRQ mapping.
+        * QEMU therefore attempts to auto-detect old broken kernels
+        * so that they still work on newer QEMU as they did on old
+        * QEMU. Since we now use the correct (ie matching-hardware)
+        * IRQ mapping we write a definitely different value to a
+        * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+        * real hardware behaviour and it need not be backwards
+        * compatible for us. This write is harmless on real hardware.
+        */
+       writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
+
+       pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
+       pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
+
+       bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
+       if (!bus)
+               return -ENOMEM;
+
+       pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+       pci_assign_unassigned_bus_resources(bus);
+
+       return 0;
+}
+
+static const struct of_device_id versatile_pci_of_match[] = {
+       { .compatible = "arm,versatile-pci", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
+
+static struct platform_driver versatile_pci_driver = {
+       .driver = {
+               .name = "versatile-pci",
+               .of_match_table = versatile_pci_of_match,
+       },
+       .probe = versatile_pci_probe,
+};
+module_platform_driver(versatile_pci_driver);
+
+MODULE_DESCRIPTION("Versatile PCI driver");
+MODULE_LICENSE("GPL v2");
index b1d0596..aab5547 100644 (file)
@@ -16,7 +16,7 @@
  * GNU General Public License for more details.
  *
  */
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/jiffies.h>
@@ -74,92 +74,6 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
        return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
 }
 
-/* PCIe Configuration Out/In */
-static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
-{
-       writel(val, addr + offset);
-}
-
-static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
-{
-       u32 val32 = readl(addr + (offset & ~0x3));
-
-       switch (offset & 0x3) {
-       case 2:
-               val32 &= ~0xFFFF0000;
-               val32 |= (u32)val << 16;
-               break;
-       case 0:
-       default:
-               val32 &= ~0xFFFF;
-               val32 |= val;
-               break;
-       }
-       writel(val32, addr + (offset & ~0x3));
-}
-
-static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
-{
-       u32 val32 = readl(addr + (offset & ~0x3));
-
-       switch (offset & 0x3) {
-       case 0:
-               val32 &= ~0xFF;
-               val32 |= val;
-               break;
-       case 1:
-               val32 &= ~0xFF00;
-               val32 |= (u32)val << 8;
-               break;
-       case 2:
-               val32 &= ~0xFF0000;
-               val32 |= (u32)val << 16;
-               break;
-       case 3:
-       default:
-               val32 &= ~0xFF000000;
-               val32 |= (u32)val << 24;
-               break;
-       }
-       writel(val32, addr + (offset & ~0x3));
-}
-
-static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
-{
-       *val = readl(addr + offset);
-}
-
-static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
-{
-       *val = readl(addr + (offset & ~0x3));
-
-       switch (offset & 0x3) {
-       case 2:
-               *val >>= 16;
-               break;
-       }
-
-       *val &= 0xFFFF;
-}
-
-static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
-{
-       *val = readl(addr + (offset & ~0x3));
-
-       switch (offset & 0x3) {
-       case 3:
-               *val = *val >> 24;
-               break;
-       case 2:
-               *val = *val >> 16;
-               break;
-       case 1:
-               *val = *val >> 8;
-               break;
-       }
-       *val &= 0xFF;
-}
-
 /*
  * When the address bit [17:16] is 2'b01, the Configuration access will be
  * treated as Type 1 and it will be forwarded to external PCIe device.
@@ -213,69 +127,23 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
        return false;
 }
 
-static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
-                                 int offset, int len, u32 *val)
-{
-       struct xgene_pcie_port *port = bus->sysdata;
-       void __iomem *addr;
-
-       if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       if (xgene_pcie_hide_rc_bars(bus, offset)) {
-               *val = 0;
-               return PCIBIOS_SUCCESSFUL;
-       }
-
-       xgene_pcie_set_rtdid_reg(bus, devfn);
-       addr = xgene_pcie_get_cfg_base(bus);
-       switch (len) {
-       case 1:
-               xgene_pcie_cfg_in8(addr, offset, val);
-               break;
-       case 2:
-               xgene_pcie_cfg_in16(addr, offset, val);
-               break;
-       default:
-               xgene_pcie_cfg_in32(addr, offset, val);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
-                                  int offset, int len, u32 val)
+static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+                             int offset)
 {
        struct xgene_pcie_port *port = bus->sysdata;
-       void __iomem *addr;
 
-       if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       if (xgene_pcie_hide_rc_bars(bus, offset))
-               return PCIBIOS_SUCCESSFUL;
+       if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
+           xgene_pcie_hide_rc_bars(bus, offset))
+               return NULL;
 
        xgene_pcie_set_rtdid_reg(bus, devfn);
-       addr = xgene_pcie_get_cfg_base(bus);
-       switch (len) {
-       case 1:
-               xgene_pcie_cfg_out8(addr, offset, (u8)val);
-               break;
-       case 2:
-               xgene_pcie_cfg_out16(addr, offset, (u16)val);
-               break;
-       default:
-               xgene_pcie_cfg_out32(addr, offset, val);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
+       return xgene_pcie_get_cfg_base(bus);
 }
 
 static struct pci_ops xgene_pcie_ops = {
-       .read = xgene_pcie_read_config,
-       .write = xgene_pcie_write_config
+       .map_bus = xgene_pcie_map_bus,
+       .read = pci_generic_config_read32,
+       .write = pci_generic_config_write32,
 };
 
 static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
@@ -401,11 +269,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
                                 struct list_head *res,
                                 resource_size_t io_base)
 {
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
        struct device *dev = port->dev;
        int ret;
 
-       list_for_each_entry(window, res, list) {
+       resource_list_for_each_entry(window, res) {
                struct resource *res = window->res;
                u64 restype = resource_type(res);
 
index 17ca986..1f4ea6f 100644 (file)
@@ -511,9 +511,6 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
        dw_pci.private_data = (void **)&pp;
 
        pci_common_init_dev(pp->dev, &dw_pci);
-#ifdef CONFIG_PCI_DOMAINS
-       dw_pci.domain++;
-#endif
 
        return 0;
 }
index 748786c..c57bd0a 100644 (file)
@@ -397,9 +397,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie)
 #endif
 
        pci_common_init_dev(&pdev->dev, &rcar_pci);
-#ifdef CONFIG_PCI_DOMAINS
-       rcar_pci.domain++;
-#endif
 }
 
 static int phy_wait_for_ack(struct rcar_pcie *pcie)
@@ -757,7 +754,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
                goto err_map_reg;
 
        i = irq_of_parse_and_map(pdev->dev.of_node, 0);
-       if (i < 0) {
+       if (!i) {
                dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
                err = -ENOENT;
                goto err_map_reg;
@@ -765,7 +762,7 @@ static int rcar_pcie_get_resources(struct platform_device *pdev,
        pcie->msi.irq1 = i;
 
        i = irq_of_parse_and_map(pdev->dev.of_node, 1);
-       if (i < 0) {
+       if (!i) {
                dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
                err = -ENOENT;
                goto err_map_reg;
index ef3ebaf..f1a06a0 100644 (file)
@@ -148,10 +148,10 @@ static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
  */
 static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
 {
-       u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+       unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
 
        if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
-               dev_dbg(port->dev, "Requester ID %d\n",
+               dev_dbg(port->dev, "Requester ID %lu\n",
                        val & XILINX_PCIE_RPEFR_REQ_ID);
                pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
                           XILINX_PCIE_REG_RPEFR);
@@ -189,7 +189,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
 }
 
 /**
- * xilinx_pcie_config_base - Get configuration base
+ * xilinx_pcie_map_bus - Get configuration base
  * @bus: PCI Bus structure
  * @devfn: Device/function
  * @where: Offset from base
@@ -197,96 +197,26 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
  * Return: Base address of the configuration space needed to be
  *        accessed.
  */
-static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
-                                            unsigned int devfn, int where)
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+                                        unsigned int devfn, int where)
 {
        struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
        int relbus;
 
+       if (!xilinx_pcie_valid_device(bus, devfn))
+               return NULL;
+
        relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
                 (devfn << ECAM_DEV_NUM_SHIFT);
 
        return port->reg_base + relbus + where;
 }
 
-/**
- * xilinx_pcie_read_config - Read configuration space
- * @bus: PCI Bus structure
- * @devfn: Device/function
- * @where: Offset from base
- * @size: Byte/word/dword
- * @val: Value to be read
- *
- * Return: PCIBIOS_SUCCESSFUL on success
- *        PCIBIOS_DEVICE_NOT_FOUND on failure
- */
-static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
-                                  int where, int size, u32 *val)
-{
-       void __iomem *addr;
-
-       if (!xilinx_pcie_valid_device(bus, devfn)) {
-               *val = 0xFFFFFFFF;
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       }
-
-       addr = xilinx_pcie_config_base(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               *val = readb(addr);
-               break;
-       case 2:
-               *val = readw(addr);
-               break;
-       default:
-               *val = readl(addr);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
-/**
- * xilinx_pcie_write_config - Write configuration space
- * @bus: PCI Bus structure
- * @devfn: Device/function
- * @where: Offset from base
- * @size: Byte/word/dword
- * @val: Value to be written to device
- *
- * Return: PCIBIOS_SUCCESSFUL on success
- *        PCIBIOS_DEVICE_NOT_FOUND on failure
- */
-static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
-                                   int where, int size, u32 val)
-{
-       void __iomem *addr;
-
-       if (!xilinx_pcie_valid_device(bus, devfn))
-               return PCIBIOS_DEVICE_NOT_FOUND;
-
-       addr = xilinx_pcie_config_base(bus, devfn, where);
-
-       switch (size) {
-       case 1:
-               writeb(val, addr);
-               break;
-       case 2:
-               writew(val, addr);
-               break;
-       default:
-               writel(val, addr);
-               break;
-       }
-
-       return PCIBIOS_SUCCESSFUL;
-}
-
 /* PCIe operations */
 static struct pci_ops xilinx_pcie_ops = {
-       .read  = xilinx_pcie_read_config,
-       .write = xilinx_pcie_write_config,
+       .map_bus = xilinx_pcie_map_bus,
+       .read   = pci_generic_config_read,
+       .write  = pci_generic_config_write,
 };
 
 /* MSI functions */
@@ -737,7 +667,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
        resource_size_t offset;
        struct of_pci_range_parser parser;
        struct of_pci_range range;
-       struct pci_host_bridge_window *win;
+       struct resource_entry *win;
        int err = 0, mem_resno = 0;
 
        /* Get the ranges */
@@ -807,7 +737,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
 
 free_resources:
        release_child_resources(&iomem_resource);
-       list_for_each_entry(win, &port->resources, list)
+       resource_list_for_each_entry(win, &port->resources)
                devm_kfree(dev, win->res);
        pci_free_resource_list(&port->resources);
 
index a5a7fd8..46db293 100644 (file)
@@ -214,8 +214,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 
        kfree(slot->hotplug_slot->info);
        kfree(slot->hotplug_slot);
-       if (slot->dev)
-               pci_dev_put(slot->dev);
+       pci_dev_put(slot->dev);
        kfree(slot);
 }
 
index ff32e85..f052e95 100644 (file)
@@ -532,8 +532,6 @@ static void interrupt_event_handler(struct work_struct *work)
                pciehp_green_led_off(p_slot);
                break;
        case INT_PRESENCE_ON:
-               if (!HP_SUPR_RM(ctrl))
-                       break;
                ctrl_dbg(ctrl, "Surprise Insertion\n");
                handle_surprise_event(p_slot);
                break;
index bada209..c32fb78 100644 (file)
@@ -475,7 +475,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
        struct slot *slot = bss_hotplug_slot->private;
        struct pci_dev *dev, *temp;
        int rc;
-       acpi_owner_id ssdt_id = 0;
+       acpi_handle ssdt_hdl = NULL;
 
        /* Acquire update access to the bus */
        mutex_lock(&sn_hotplug_mutex);
@@ -522,7 +522,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
                        if (ACPI_SUCCESS(ret) &&
                            (adr>>16) == (slot->device_num + 1)) {
                                /* retain the owner id */
-                               acpi_get_id(chandle, &ssdt_id);
+                               ssdt_hdl = chandle;
 
                                ret = acpi_bus_get_device(chandle,
                                                          &device);
@@ -547,12 +547,13 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
        pci_unlock_rescan_remove();
 
        /* Remove the SSDT for the slot from the ACPI namespace */
-       if (SN_ACPI_BASE_SUPPORT() && ssdt_id) {
+       if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
                acpi_status ret;
-               ret = acpi_unload_table_id(ssdt_id);
+               ret = acpi_unload_parent_table(ssdt_hdl);
                if (ACPI_FAILURE(ret)) {
-                       printk(KERN_ERR "%s: acpi_unload_table_id failed (0x%x) for id %d\n",
-                              __func__, ret, ssdt_id);
+                       acpi_handle_err(ssdt_hdl,
+                                       "%s: acpi_unload_parent_table failed (0x%x)\n",
+                                       __func__, ret);
                        /* try to continue on */
                }
        }
index fd60806..c3e7dfc 100644 (file)
@@ -694,11 +694,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
 {
        resource_size_t phys_addr;
        u32 table_offset;
+       unsigned long flags;
        u8 bir;
 
        pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
                              &table_offset);
        bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+       flags = pci_resource_flags(dev, bir);
+       if (!flags || (flags & IORESOURCE_UNSET))
+               return NULL;
+
        table_offset &= PCI_MSIX_TABLE_OFFSET;
        phys_addr = pci_resource_start(dev, bir) + table_offset;
 
index 3542150..4890639 100644 (file)
@@ -501,12 +501,29 @@ static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
        return 0;
 }
 
+static bool acpi_pci_need_resume(struct pci_dev *dev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+       if (!adev || !acpi_device_power_manageable(adev))
+               return false;
+
+       if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
+               return true;
+
+       if (acpi_target_system_state() == ACPI_STATE_S0)
+               return false;
+
+       return !!adev->power.flags.dsw_present;
+}
+
 static struct pci_platform_pm_ops acpi_pci_platform_pm = {
        .is_manageable = acpi_pci_power_manageable,
        .set_state = acpi_pci_set_power_state,
        .choose_state = acpi_pci_choose_state,
        .sleep_wake = acpi_pci_sleep_wake,
        .run_wake = acpi_pci_run_wake,
+       .need_resume = acpi_pci_need_resume,
 };
 
 void acpi_pci_add_bus(struct pci_bus *bus)
index 887e6bd..3cb2210 100644 (file)
@@ -653,7 +653,6 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
 static int pci_pm_prepare(struct device *dev)
 {
        struct device_driver *drv = dev->driver;
-       int error = 0;
 
        /*
         * Devices having power.ignore_children set may still be necessary for
@@ -662,10 +661,12 @@ static int pci_pm_prepare(struct device *dev)
        if (dev->power.ignore_children)
                pm_runtime_resume(dev);
 
-       if (drv && drv->pm && drv->pm->prepare)
-               error = drv->pm->prepare(dev);
-
-       return error;
+       if (drv && drv->pm && drv->pm->prepare) {
+               int error = drv->pm->prepare(dev);
+               if (error)
+                       return error;
+       }
+       return pci_dev_keep_suspended(to_pci_dev(dev));
 }
 
 
@@ -1383,7 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
        if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
                return -ENOMEM;
 
-       if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
+       if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
                           pdev->vendor, pdev->device,
                           pdev->subsystem_vendor, pdev->subsystem_device,
                           (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
index e9d4fd8..81f06e8 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
 #include <linux/pci.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
@@ -521,6 +523,11 @@ static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
                        pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 }
 
+static inline bool platform_pci_need_resume(struct pci_dev *dev)
+{
+       return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
+}
+
 /**
  * pci_raw_set_power_state - Use PCI PM registers to set the power state of
  *                           given PCI device
@@ -1999,6 +2006,27 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
 
+/**
+ * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
+ * @pci_dev: Device to check.
+ *
+ * Return 'true' if the device is runtime-suspended, it doesn't have to be
+ * reconfigured due to wakeup settings difference between system and runtime
+ * suspend and the current power state of it is suitable for the upcoming
+ * (system) transition.
+ */
+bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
+{
+       struct device *dev = &pci_dev->dev;
+
+       if (!pm_runtime_suspended(dev)
+           || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+           || platform_pci_need_resume(pci_dev))
+               return false;
+
+       return pci_target_state(pci_dev) == pci_dev->current_state;
+}
+
 void pci_config_pm_runtime_get(struct pci_dev *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -3197,7 +3225,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
 {
        u16 csr;
 
-       if (!dev->pm_cap)
+       if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
                return -ENOTTY;
 
        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
@@ -4471,6 +4499,53 @@ int pci_get_new_domain_nr(void)
 {
        return atomic_inc_return(&__domain_nr);
 }
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+       static int use_dt_domains = -1;
+       int domain = of_get_pci_domain_nr(parent->of_node);
+
+       /*
+        * Check DT domain and use_dt_domains values.
+        *
+        * If DT domain property is valid (domain >= 0) and
+        * use_dt_domains != 0, the DT assignment is valid since this means
+        * we have not previously allocated a domain number by using
+        * pci_get_new_domain_nr(); we should also update use_dt_domains to
+        * 1, to indicate that we have just assigned a domain number from
+        * DT.
+        *
+        * If DT domain property value is not valid (ie domain < 0), and we
+        * have not previously assigned a domain number from DT
+        * (use_dt_domains != 1) we should assign a domain number by
+        * using the:
+        *
+        * pci_get_new_domain_nr()
+        *
+        * API and update the use_dt_domains value to keep track of method we
+        * are using to assign domain numbers (use_dt_domains = 0).
+        *
+        * All other combinations imply we have a platform that is trying
+        * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
+        * which is a recipe for domain mishandling and it is prevented by
+        * invalidating the domain value (domain = -1) and printing a
+        * corresponding error.
+        */
+       if (domain >= 0 && use_dt_domains) {
+               use_dt_domains = 1;
+       } else if (domain < 0 && use_dt_domains != 1) {
+               use_dt_domains = 0;
+               domain = pci_get_new_domain_nr();
+       } else {
+               dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
+                       parent->of_node->full_name);
+               domain = -1;
+       }
+
+       bus->domain_nr = domain;
+}
+#endif
 #endif
 
 /**
index d54632a..4091f82 100644 (file)
@@ -50,6 +50,10 @@ int pci_probe_reset_function(struct pci_dev *dev);
  *             for given device (the device's wake-up capability has to be
  *             enabled by @sleep_wake for this feature to work)
  *
+ * @need_resume: returns 'true' if the given device (which is currently
+ *             suspended) needs to be resumed to be configured for system
+ *             wakeup.
+ *
  * If given platform is generally capable of power managing PCI devices, all of
  * these callbacks are mandatory.
  */
@@ -59,6 +63,7 @@ struct pci_platform_pm_ops {
        pci_power_t (*choose_state)(struct pci_dev *dev);
        int (*sleep_wake)(struct pci_dev *dev, bool enable);
        int (*run_wake)(struct pci_dev *dev, bool enable);
+       bool (*need_resume)(struct pci_dev *dev);
 };
 
 int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
@@ -67,6 +72,7 @@ void pci_power_up(struct pci_dev *dev);
 void pci_disable_enabled_device(struct pci_dev *dev);
 int pci_finish_runtime_suspend(struct pci_dev *dev);
 int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+bool pci_dev_keep_suspended(struct pci_dev *dev);
 void pci_config_pm_runtime_get(struct pci_dev *dev);
 void pci_config_pm_runtime_put(struct pci_dev *dev);
 void pci_pm_init(struct pci_dev *dev);
index e1e7026..820740a 100644 (file)
@@ -859,7 +859,10 @@ static ssize_t link_state_store(struct device *dev,
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct pcie_link_state *link, *root = pdev->link_state->root;
-       u32 val = buf[0] - '0', state = 0;
+       u32 val, state = 0;
+
+       if (kstrtouint(buf, 10, &val))
+               return -EINVAL;
 
        if (aspm_disabled)
                return -EPERM;
@@ -900,15 +903,14 @@ static ssize_t clk_ctl_store(struct device *dev,
                size_t n)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       int state;
+       bool state;
 
-       if (n < 1)
+       if (strtobool(buf, &state))
                return -EINVAL;
-       state = buf[0]-'0';
 
        down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
-       pcie_set_clkpm_nocheck(pdev->link_state, !!state);
+       pcie_set_clkpm_nocheck(pdev->link_state, state);
        mutex_unlock(&aspm_lock);
        up_read(&pci_bus_sem);
 
index 23212f8..8d2f400 100644 (file)
@@ -1895,7 +1895,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
        int error;
        struct pci_host_bridge *bridge;
        struct pci_bus *b, *b2;
-       struct pci_host_bridge_window *window, *n;
+       struct resource_entry *window, *n;
        struct resource *res;
        resource_size_t offset;
        char bus_addr[64];
@@ -1959,8 +1959,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
                printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
 
        /* Add initial resources to the bus */
-       list_for_each_entry_safe(window, n, resources, list) {
-               list_move_tail(&window->list, &bridge->windows);
+       resource_list_for_each_entry_safe(window, n, resources) {
+               list_move_tail(&window->node, &bridge->windows);
                res = window->res;
                offset = window->offset;
                if (res->flags & IORESOURCE_BUS)
@@ -2060,12 +2060,12 @@ void pci_bus_release_busn_res(struct pci_bus *b)
 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
                struct pci_ops *ops, void *sysdata, struct list_head *resources)
 {
-       struct pci_host_bridge_window *window;
+       struct resource_entry *window;
        bool found = false;
        struct pci_bus *b;
        int max;
 
-       list_for_each_entry(window, resources, list)
+       resource_list_for_each_entry(window, resources)
                if (window->res->flags & IORESOURCE_BUS) {
                        found = true;
                        break;
index 903d507..85f247e 100644 (file)
@@ -3076,6 +3076,27 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
 
+static void quirk_no_pm_reset(struct pci_dev *dev)
+{
+       /*
+        * We can't do a bus reset on root bus devices, but an ineffective
+        * PM reset may be better than nothing.
+        */
+       if (!pci_is_root_bus(dev->bus))
+               dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
+}
+
+/*
+ * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
+ * causes a reset (i.e., they advertise NoSoftRst-).  This transition seems
+ * to have no effect on the device: it retains the framebuffer contents and
+ * monitor sync.  Advertising this support makes other layers, like VFIO,
+ * assume pci_reset_function() is viable for this device.  Mark it as
+ * unavailable to skip it when testing reset methods.
+ */
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+                              PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+
 #ifdef CONFIG_ACPI
 /*
  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3575,6 +3596,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
                         PCI_DEVICE_ID_JMICRON_JMB388_ESD,
                         quirk_dma_func1_alias);
 
+/*
+ * Some devices DMA with the wrong devfn, not just the wrong function.
+ * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
+ * the alias is "fixed" and independent of the device devfn.
+ *
+ * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
+ * processor.  To software, this appears as a PCIe-to-PCI/X bridge with a
+ * single device on the secondary bus.  In reality, the single exposed
+ * device at 0e.0 is the Address Translation Unit (ATU) of the controller
+ * that provides a bridge to the internal bus of the I/O processor.  The
+ * controller supports private devices, which can be hidden from PCI config
+ * space.  In the case of the Adaptec 3405, a private device at 01.0
+ * appears to be the DMA engine, which therefore needs to become a DMA
+ * alias for the device.
+ */
+static const struct pci_device_id fixed_dma_alias_tbl[] = {
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+                        PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
+         .driver_data = PCI_DEVFN(1, 0) },
+       { 0 }
+};
+
+static void quirk_fixed_dma_alias(struct pci_dev *dev)
+{
+       const struct pci_device_id *id;
+
+       id = pci_match_id(fixed_dma_alias_tbl, dev);
+       if (id) {
+               dev->dma_alias_devfn = id->driver_data;
+               dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+               dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+                        PCI_SLOT(dev->dma_alias_devfn),
+                        PCI_FUNC(dev->dma_alias_devfn));
+       }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
+
 /*
  * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
  * using the wrong DMA alias for the device.  Some of these devices can be
@@ -3678,6 +3737,9 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = {
        0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
        /* Patsburg (X79) PCH */
        0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
+       /* Wellsburg (X99) PCH */
+       0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
+       0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
 };
 
 static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
@@ -3761,6 +3823,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+       { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
+       { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
        { 0 }
 };
 
index f955edb..eb0ad53 100644 (file)
@@ -71,6 +71,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
 {
        void __iomem *image;
        int last_image;
+       unsigned length;
 
        image = rom;
        do {
@@ -93,9 +94,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
                if (readb(pds + 3) != 'R')
                        break;
                last_image = readb(pds + 21) & 0x80;
-               /* this length is reliable */
-               image += readw(pds + 16) * 512;
-       } while (!last_image);
+               length = readw(pds + 16);
+               image += length * 512;
+       } while (length && !last_image);
 
        /* never return a size larger than the PCI resource window */
        /* there are known ROMs that get the size wrong */
index 66977eb..ff0356f 100644 (file)
@@ -180,20 +180,21 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
        struct pnp_dev *dev = data;
        struct acpi_resource_dma *dma;
        struct acpi_resource_vendor_typed *vendor_typed;
-       struct resource r = {0};
+       struct resource_win win = {{0}, 0};
+       struct resource *r = &win.res;
        int i, flags;
 
-       if (acpi_dev_resource_address_space(res, &r)
-           || acpi_dev_resource_ext_address_space(res, &r)) {
-               pnp_add_resource(dev, &r);
+       if (acpi_dev_resource_address_space(res, &win)
+           || acpi_dev_resource_ext_address_space(res, &win)) {
+               pnp_add_resource(dev, &win.res);
                return AE_OK;
        }
 
-       r.flags = 0;
-       if (acpi_dev_resource_interrupt(res, 0, &r)) {
-               pnpacpi_add_irqresource(dev, &r);
-               for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++)
-                       pnpacpi_add_irqresource(dev, &r);
+       r->flags = 0;
+       if (acpi_dev_resource_interrupt(res, 0, r)) {
+               pnpacpi_add_irqresource(dev, r);
+               for (i = 1; acpi_dev_resource_interrupt(res, i, r); i++)
+                       pnpacpi_add_irqresource(dev, r);
 
                if (i > 1) {
                        /*
@@ -209,7 +210,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                        }
                }
                return AE_OK;
-       } else if (r.flags & IORESOURCE_DISABLED) {
+       } else if (r->flags & IORESOURCE_DISABLED) {
                pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
                return AE_OK;
        }
@@ -218,13 +219,13 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
        case ACPI_RESOURCE_TYPE_MEMORY24:
        case ACPI_RESOURCE_TYPE_MEMORY32:
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
-               if (acpi_dev_resource_memory(res, &r))
-                       pnp_add_resource(dev, &r);
+               if (acpi_dev_resource_memory(res, r))
+                       pnp_add_resource(dev, r);
                break;
        case ACPI_RESOURCE_TYPE_IO:
        case ACPI_RESOURCE_TYPE_FIXED_IO:
-               if (acpi_dev_resource_io(res, &r))
-                       pnp_add_resource(dev, &r);
+               if (acpi_dev_resource_io(res, r))
+                       pnp_add_resource(dev, r);
                break;
        case ACPI_RESOURCE_TYPE_DMA:
                dma = &res->data.dma;
@@ -410,12 +411,12 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
        if (p->resource_type == ACPI_MEMORY_RANGE) {
                if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
                        flags = IORESOURCE_MEM_WRITEABLE;
-               pnp_register_mem_resource(dev, option_flags, p->minimum,
-                                         p->minimum, 0, p->address_length,
+               pnp_register_mem_resource(dev, option_flags, p->address.minimum,
+                                         p->address.minimum, 0, p->address.address_length,
                                          flags);
        } else if (p->resource_type == ACPI_IO_RANGE)
-               pnp_register_port_resource(dev, option_flags, p->minimum,
-                                          p->minimum, 0, p->address_length,
+               pnp_register_port_resource(dev, option_flags, p->address.minimum,
+                                          p->address.minimum, 0, p->address.address_length,
                                           IORESOURCE_IO_FIXED);
 }
 
@@ -429,12 +430,12 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
        if (p->resource_type == ACPI_MEMORY_RANGE) {
                if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
                        flags = IORESOURCE_MEM_WRITEABLE;
-               pnp_register_mem_resource(dev, option_flags, p->minimum,
-                                         p->minimum, 0, p->address_length,
+               pnp_register_mem_resource(dev, option_flags, p->address.minimum,
+                                         p->address.minimum, 0, p->address.address_length,
                                          flags);
        } else if (p->resource_type == ACPI_IO_RANGE)
-               pnp_register_port_resource(dev, option_flags, p->minimum,
-                                          p->minimum, 0, p->address_length,
+               pnp_register_port_resource(dev, option_flags, p->address.minimum,
+                                          p->address.minimum, 0, p->address.address_length,
                                           IORESOURCE_IO_FIXED);
 }
 
index 8bcfecd..eeca70d 100644 (file)
@@ -2430,7 +2430,7 @@ static int tsi721_probe(struct pci_dev *pdev,
        pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
                PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
                PCI_EXP_DEVCTL_NOSNOOP_EN,
-               0x2 << MAX_READ_REQUEST_SZ_SHIFT);
+               PCI_EXP_DEVCTL_READRQ_512B);
 
        /* Adjust PCIe completion timeout. */
        pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
index a7b4268..9d25025 100644 (file)
@@ -72,8 +72,6 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
-#define MAX_READ_REQUEST_SZ_SHIFT      12
-
 /*
  * Event Management Registers
  */
index 6776931..78ce4d6 100644 (file)
@@ -813,12 +813,13 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
                pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
                                     &devcontrol);
 
-               if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
+               if ((devcontrol & PCI_EXP_DEVCTL_READRQ) >
+                    PCI_EXP_DEVCTL_READRQ_512B) {
                        esas2r_log(ESAS2R_LOG_INFO,
                                   "max read request size > 512B");
 
                        devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
-                       devcontrol |= 0x2000;
+                       devcontrol |= PCI_EXP_DEVCTL_READRQ_512B;
                        pci_write_config_word(a->pcid,
                                              pcie_cap_reg + PCI_EXP_DEVCTL,
                                              devcontrol);
index 1e824fb..296db7a 100644 (file)
@@ -161,7 +161,7 @@ static int sfi_verify_table(struct sfi_table_header *table)
  * Check for common case that we can re-use mapping to SYST,
  * which requires syst_pa, syst_va to be initialized.
  */
-struct sfi_table_header *sfi_map_table(u64 pa)
+static struct sfi_table_header *sfi_map_table(u64 pa)
 {
        struct sfi_table_header *th;
        u32 length;
@@ -189,7 +189,7 @@ struct sfi_table_header *sfi_map_table(u64 pa)
  * Undoes effect of sfi_map_table() by unmapping table
  * if it did not completely fit on same page as SYST.
  */
-void sfi_unmap_table(struct sfi_table_header *th)
+static void sfi_unmap_table(struct sfi_table_header *th)
 {
        if (!TABLE_ON_PAGE(syst_va, th, th->len))
                sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
index aeb50bb..eaffb02 100644 (file)
@@ -3452,8 +3452,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
        return status;
 }
 
-#ifdef CONFIG_PM
-
 int usb_remote_wakeup(struct usb_device *udev)
 {
        int     status = 0;
@@ -3512,16 +3510,6 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
        return connect_change;
 }
 
-#else
-
-static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
-               u16 portstatus, u16 portchange)
-{
-       return 0;
-}
-
-#endif
-
 static int check_ports_changed(struct usb_hub *hub)
 {
        int port1;
index 4953b65..cb9ee25 100644 (file)
@@ -3118,8 +3118,7 @@ int __init atafb_init(void)
                        printk("atafb_init: initializing Falcon hw\n");
                        fbhw = &falcon_switch;
                        atafb_ops.fb_setcolreg = &falcon_setcolreg;
-                       error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher,
-                                           IRQ_TYPE_PRIO,
+                       error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, 0,
                                            "framebuffer:modeswitch",
                                            falcon_vbl_switcher);
                        if (error)
index 3860d02..0b52d92 100644 (file)
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
 
 /* We increase/decrease in batches which fit in a page */
 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
-static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
 
 
 /* List of ballooned pages, threaded through the mem_map array. */
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                page = pfn_to_page(pfn);
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
-               /*
-                * Ballooned out frames are effectively replaced with
-                * a scratch frame.  Ensure direct mappings and the
-                * p2m are consistent.
-                */
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
                        if (!PageHighMem(page)) {
-                               struct page *scratch_page = get_balloon_scratch_page();
-
                                ret = HYPERVISOR_update_va_mapping(
                                                (unsigned long)__va(pfn << PAGE_SHIFT),
-                                               pfn_pte(page_to_pfn(scratch_page),
-                                                       PAGE_KERNEL_RO), 0);
+                                               __pte_ma(0), 0);
                                BUG_ON(ret);
-
-                               put_balloon_scratch_page();
                        }
                        __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                }
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
        mutex_unlock(&balloon_mutex);
 }
 
-struct page *get_balloon_scratch_page(void)
-{
-       struct page *ret = get_cpu_var(balloon_scratch_page);
-       BUG_ON(ret == NULL);
-       return ret;
-}
-
-void put_balloon_scratch_page(void)
-{
-       put_cpu_var(balloon_scratch_page);
-}
-
 /* Resets the Xen limit, sets new target, and kicks off processing. */
 void balloon_set_new_target(unsigned long target)
 {
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
        }
 }
 
-static int alloc_balloon_scratch_page(int cpu)
-{
-       if (per_cpu(balloon_scratch_page, cpu) != NULL)
-               return 0;
-
-       per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
-       if (per_cpu(balloon_scratch_page, cpu) == NULL) {
-               pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-
-static int balloon_cpu_notify(struct notifier_block *self,
-                                   unsigned long action, void *hcpu)
-{
-       int cpu = (long)hcpu;
-       switch (action) {
-       case CPU_UP_PREPARE:
-               if (alloc_balloon_scratch_page(cpu))
-                       return NOTIFY_BAD;
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block balloon_cpu_notifier = {
-       .notifier_call  = balloon_cpu_notify,
-};
-
 static int __init balloon_init(void)
 {
-       int i, cpu;
+       int i;
 
        if (!xen_domain())
                return -ENODEV;
 
-       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-               register_cpu_notifier(&balloon_cpu_notifier);
-
-               get_online_cpus();
-               for_each_online_cpu(cpu) {
-                       if (alloc_balloon_scratch_page(cpu)) {
-                               put_online_cpus();
-                               unregister_cpu_notifier(&balloon_cpu_notifier);
-                               return -ENOMEM;
-                       }
-               }
-               put_online_cpus();
-       }
-
        pr_info("Initialising balloon driver\n");
 
        balloon_stats.current_pages = xen_pv_domain()
@@ -696,15 +625,4 @@ static int __init balloon_init(void)
 
 subsys_initcall(balloon_init);
 
-static int __init balloon_clear(void)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               per_cpu(balloon_scratch_page, cpu) = NULL;
-
-       return 0;
-}
-early_initcall(balloon_clear);
-
 MODULE_LICENSE("GPL");
index 073b4a1..d5bb1a3 100644 (file)
@@ -67,7 +67,7 @@ struct gntdev_priv {
         * Only populated if populate_freeable_maps == 1 */
        struct list_head freeable_maps;
        /* lock protects maps and freeable_maps */
-       spinlock_t lock;
+       struct mutex lock;
        struct mm_struct *mm;
        struct mmu_notifier mn;
 };
@@ -91,7 +91,9 @@ struct grant_map {
        struct gnttab_map_grant_ref   *map_ops;
        struct gnttab_unmap_grant_ref *unmap_ops;
        struct gnttab_map_grant_ref   *kmap_ops;
+       struct gnttab_unmap_grant_ref *kunmap_ops;
        struct page **pages;
+       unsigned long pages_vm_start;
 };
 
 static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
@@ -118,12 +120,13 @@ static void gntdev_free_map(struct grant_map *map)
                return;
 
        if (map->pages)
-               free_xenballooned_pages(map->count, map->pages);
+               gnttab_free_pages(map->count, map->pages);
        kfree(map->pages);
        kfree(map->grants);
        kfree(map->map_ops);
        kfree(map->unmap_ops);
        kfree(map->kmap_ops);
+       kfree(map->kunmap_ops);
        kfree(map);
 }
 
@@ -140,21 +143,24 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
        add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
        add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
        add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
+       add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
        add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
        if (NULL == add->grants    ||
            NULL == add->map_ops   ||
            NULL == add->unmap_ops ||
            NULL == add->kmap_ops  ||
+           NULL == add->kunmap_ops ||
            NULL == add->pages)
                goto err;
 
-       if (alloc_xenballooned_pages(count, add->pages, false /* lowmem */))
+       if (gnttab_alloc_pages(count, add->pages))
                goto err;
 
        for (i = 0; i < count; i++) {
                add->map_ops[i].handle = -1;
                add->unmap_ops[i].handle = -1;
                add->kmap_ops[i].handle = -1;
+               add->kunmap_ops[i].handle = -1;
        }
 
        add->index = 0;
@@ -216,9 +222,9 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
        }
 
        if (populate_freeable_maps && priv) {
-               spin_lock(&priv->lock);
+               mutex_lock(&priv->lock);
                list_del(&map->next);
-               spin_unlock(&priv->lock);
+               mutex_unlock(&priv->lock);
        }
 
        if (map->pages && !use_ptemod)
@@ -239,6 +245,14 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
        BUG_ON(pgnr >= map->count);
        pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 
+       /*
+        * Set the PTE as special to force get_user_pages_fast() fall
+        * back to the slow path.  If this is not supported as part of
+        * the grant map, it will be done afterwards.
+        */
+       if (xen_feature(XENFEAT_gnttab_map_avail_bits))
+               flags |= (1 << _GNTMAP_guest_avail0);
+
        gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
                          map->grants[pgnr].ref,
                          map->grants[pgnr].domid);
@@ -247,6 +261,15 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
        return 0;
 }
 
+#ifdef CONFIG_X86
+static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
+                                    unsigned long addr, void *data)
+{
+       set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
+       return 0;
+}
+#endif
+
 static int map_grant_pages(struct grant_map *map)
 {
        int i, err = 0;
@@ -280,6 +303,8 @@ static int map_grant_pages(struct grant_map *map)
                                map->flags | GNTMAP_host_map,
                                map->grants[i].ref,
                                map->grants[i].domid);
+                       gnttab_set_unmap_op(&map->kunmap_ops[i], address,
+                               map->flags | GNTMAP_host_map, -1);
                }
        }
 
@@ -290,20 +315,42 @@ static int map_grant_pages(struct grant_map *map)
                return err;
 
        for (i = 0; i < map->count; i++) {
-               if (map->map_ops[i].status)
+               if (map->map_ops[i].status) {
                        err = -EINVAL;
-               else {
-                       BUG_ON(map->map_ops[i].handle == -1);
-                       map->unmap_ops[i].handle = map->map_ops[i].handle;
-                       pr_debug("map handle=%d\n", map->map_ops[i].handle);
+                       continue;
                }
+
+               map->unmap_ops[i].handle = map->map_ops[i].handle;
+               if (use_ptemod)
+                       map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
        }
        return err;
 }
 
+struct unmap_grant_pages_callback_data
+{
+       struct completion completion;
+       int result;
+};
+
+static void unmap_grant_callback(int result,
+                                struct gntab_unmap_queue_data *data)
+{
+       struct unmap_grant_pages_callback_data* d = data->data;
+
+       d->result = result;
+       complete(&d->completion);
+}
+
 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 {
        int i, err = 0;
+       struct gntab_unmap_queue_data unmap_data;
+       struct unmap_grant_pages_callback_data data;
+
+       init_completion(&data.completion);
+       unmap_data.data = &data;
+       unmap_data.done= &unmap_grant_callback;
 
        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
                int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -315,11 +362,16 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
                }
        }
 
-       err = gnttab_unmap_refs(map->unmap_ops + offset,
-                       use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
-                       pages);
-       if (err)
-               return err;
+       unmap_data.unmap_ops = map->unmap_ops + offset;
+       unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+       unmap_data.pages = map->pages + offset;
+       unmap_data.count = pages;
+
+       gnttab_unmap_refs_async(&unmap_data);
+
+       wait_for_completion(&data.completion);
+       if (data.result)
+               return data.result;
 
        for (i = 0; i < pages; i++) {
                if (map->unmap_ops[offset+i].status)
@@ -387,17 +439,26 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
                 * not do any unmapping, since that has been done prior to
                 * closing the vma, but it may still iterate the unmap_ops list.
                 */
-               spin_lock(&priv->lock);
+               mutex_lock(&priv->lock);
                map->vma = NULL;
-               spin_unlock(&priv->lock);
+               mutex_unlock(&priv->lock);
        }
        vma->vm_private_data = NULL;
        gntdev_put_map(priv, map);
 }
 
+static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
+                                                unsigned long addr)
+{
+       struct grant_map *map = vma->vm_private_data;
+
+       return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
+}
+
 static struct vm_operations_struct gntdev_vmops = {
        .open = gntdev_vma_open,
        .close = gntdev_vma_close,
+       .find_special_page = gntdev_vma_find_special_page,
 };
 
 /* ------------------------------------------------------------------ */
@@ -433,14 +494,14 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
        struct grant_map *map;
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
        list_for_each_entry(map, &priv->maps, next) {
                unmap_if_in_range(map, start, end);
        }
        list_for_each_entry(map, &priv->freeable_maps, next) {
                unmap_if_in_range(map, start, end);
        }
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 }
 
 static void mn_invl_page(struct mmu_notifier *mn,
@@ -457,7 +518,7 @@ static void mn_release(struct mmu_notifier *mn,
        struct grant_map *map;
        int err;
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
        list_for_each_entry(map, &priv->maps, next) {
                if (!map->vma)
                        continue;
@@ -476,7 +537,7 @@ static void mn_release(struct mmu_notifier *mn,
                err = unmap_grant_pages(map, /* offset */ 0, map->count);
                WARN_ON(err);
        }
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 }
 
 static struct mmu_notifier_ops gntdev_mmu_ops = {
@@ -498,7 +559,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
 
        INIT_LIST_HEAD(&priv->maps);
        INIT_LIST_HEAD(&priv->freeable_maps);
-       spin_lock_init(&priv->lock);
+       mutex_init(&priv->lock);
 
        if (use_ptemod) {
                priv->mm = get_task_mm(current);
@@ -572,10 +633,10 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
                return -EFAULT;
        }
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
        gntdev_add_map(priv, map);
        op.index = map->index << PAGE_SHIFT;
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 
        if (copy_to_user(u, &op, sizeof(op)) != 0)
                return -EFAULT;
@@ -594,7 +655,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
                return -EFAULT;
        pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
        if (map) {
                list_del(&map->next);
@@ -602,7 +663,7 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
                        list_add_tail(&map->next, &priv->freeable_maps);
                err = 0;
        }
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
        if (map)
                gntdev_put_map(priv, map);
        return err;
@@ -670,7 +731,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
        out_flags = op.action;
        out_event = op.event_channel_port;
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
 
        list_for_each_entry(map, &priv->maps, next) {
                uint64_t begin = map->index << PAGE_SHIFT;
@@ -698,7 +759,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
        rc = 0;
 
  unlock_out:
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 
        /* Drop the reference to the event channel we did not save in the map */
        if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
@@ -748,7 +809,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
        pr_debug("map %d+%d at %lx (pgoff %lx)\n",
                        index, count, vma->vm_start, vma->vm_pgoff);
 
-       spin_lock(&priv->lock);
+       mutex_lock(&priv->lock);
        map = gntdev_find_map_index(priv, index, count);
        if (!map)
                goto unlock_out;
@@ -783,7 +844,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                        map->flags |= GNTMAP_readonly;
        }
 
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 
        if (use_ptemod) {
                err = apply_to_page_range(vma->vm_mm, vma->vm_start,
@@ -806,16 +867,34 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                        if (err)
                                goto out_put_map;
                }
+       } else {
+#ifdef CONFIG_X86
+               /*
+                * If the PTEs were not made special by the grant map
+                * hypercall, do so here.
+                *
+                * This is racy since the mapping is already visible
+                * to userspace but userspace should be well-behaved
+                * enough to not touch it until the mmap() call
+                * returns.
+                */
+               if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
+                       apply_to_page_range(vma->vm_mm, vma->vm_start,
+                                           vma->vm_end - vma->vm_start,
+                                           set_grant_ptes_as_special, NULL);
+               }
+#endif
+               map->pages_vm_start = vma->vm_start;
        }
 
        return 0;
 
 unlock_out:
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
        return err;
 
 out_unlock_put:
-       spin_unlock(&priv->lock);
+       mutex_unlock(&priv->lock);
 out_put_map:
        if (use_ptemod)
                map->vma = NULL;
index 7786291..17972fb 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/hardirq.h>
+#include <linux/workqueue.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
@@ -50,6 +51,7 @@
 #include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
 #include <xen/swiotlb-xen.h>
+#include <xen/balloon.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/interface.h>
 
@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void)
 }
 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 
+/**
+ * gnttab_alloc_pages - alloc pages suitable for grant mapping into
+ * @nr_pages: number of pages to alloc
+ * @pages: returns the pages
+ */
+int gnttab_alloc_pages(int nr_pages, struct page **pages)
+{
+       int i;
+       int ret;
+
+       ret = alloc_xenballooned_pages(nr_pages, pages, false);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < nr_pages; i++) {
+#if BITS_PER_LONG < 64
+               struct xen_page_foreign *foreign;
+
+               foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
+               if (!foreign) {
+                       gnttab_free_pages(nr_pages, pages);
+                       return -ENOMEM;
+               }
+               set_page_private(pages[i], (unsigned long)foreign);
+#endif
+               SetPagePrivate(pages[i]);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(gnttab_alloc_pages);
+
+/**
+ * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
+ * @nr_pages; number of pages to free
+ * @pages: the pages
+ */
+void gnttab_free_pages(int nr_pages, struct page **pages)
+{
+       int i;
+
+       for (i = 0; i < nr_pages; i++) {
+               if (PagePrivate(pages[i])) {
+#if BITS_PER_LONG < 64
+                       kfree((void *)page_private(pages[i]));
+#endif
+                       ClearPagePrivate(pages[i]);
+               }
+       }
+       free_xenballooned_pages(nr_pages, pages);
+}
+EXPORT_SYMBOL(gnttab_free_pages);
+
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
 static inline void
@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
        if (ret)
                return ret;
 
-       /* Retry eagain maps */
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
+               /* Retry eagain maps */
                if (map_ops[i].status == GNTST_eagain)
                        gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
                                                &map_ops[i].status, __func__);
 
+               if (map_ops[i].status == GNTST_okay) {
+                       struct xen_page_foreign *foreign;
+
+                       SetPageForeign(pages[i]);
+                       foreign = xen_page_foreign(pages[i]);
+                       foreign->domid = map_ops[i].dom;
+                       foreign->gref = map_ops[i].ref;
+               }
+       }
+
        return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
 }
 EXPORT_SYMBOL_GPL(gnttab_map_refs);
 
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
-                     struct gnttab_map_grant_ref *kmap_ops,
+                     struct gnttab_unmap_grant_ref *kunmap_ops,
                      struct page **pages, unsigned int count)
 {
+       unsigned int i;
        int ret;
 
        ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
        if (ret)
                return ret;
 
-       return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
+       for (i = 0; i < count; i++)
+               ClearPageForeign(pages[i]);
+
+       return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
 }
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
+#define GNTTAB_UNMAP_REFS_DELAY 5
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
+static void gnttab_unmap_work(struct work_struct *work)
+{
+       struct gntab_unmap_queue_data
+               *unmap_data = container_of(work, 
+                                          struct gntab_unmap_queue_data,
+                                          gnttab_work.work);
+       if (unmap_data->age != UINT_MAX)
+               unmap_data->age++;
+       __gnttab_unmap_refs_async(unmap_data);
+}
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+       int ret;
+       int pc;
+
+       for (pc = 0; pc < item->count; pc++) {
+               if (page_count(item->pages[pc]) > 1) {
+                       unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
+                       schedule_delayed_work(&item->gnttab_work,
+                                             msecs_to_jiffies(delay));
+                       return;
+               }
+       }
+
+       ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
+                               item->pages, item->count);
+       item->done(ret, item);
+}
+
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+       INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
+       item->age = 0;
+
+       __gnttab_unmap_refs_async(item);
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
+
 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
 {
        int rc;
index f8bb36f..bf19407 100644 (file)
@@ -105,10 +105,16 @@ static void do_suspend(void)
 
        err = freeze_processes();
        if (err) {
-               pr_err("%s: freeze failed %d\n", __func__, err);
+               pr_err("%s: freeze processes failed %d\n", __func__, err);
                goto out;
        }
 
+       err = freeze_kernel_threads();
+       if (err) {
+               pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
+               goto out_thaw;
+       }
+
        err = dpm_suspend_start(PMSG_FREEZE);
        if (err) {
                pr_err("%s: dpm_suspend_start %d\n", __func__, err);
index 83b5c53..8a65423 100644 (file)
@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = {
 };
 #endif
 
-static int xen_tmem_init(void)
+static int __init xen_tmem_init(void)
 {
        if (!xen_domain())
                return 0;
index 34e40b7..4fc886c 100644 (file)
@@ -117,8 +117,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
        list_for_each_entry(info, &mem_device->res_list, list) {
                if ((info->caching == address64.info.mem.caching) &&
                    (info->write_protect == address64.info.mem.write_protect) &&
-                   (info->start_addr + info->length == address64.minimum)) {
-                       info->length += address64.address_length;
+                   (info->start_addr + info->length == address64.address.minimum)) {
+                       info->length += address64.address.address_length;
                        return AE_OK;
                }
        }
@@ -130,8 +130,8 @@ acpi_memory_get_resource(struct acpi_resource *resource, void *context)
        INIT_LIST_HEAD(&new->list);
        new->caching = address64.info.mem.caching;
        new->write_protect = address64.info.mem.write_protect;
-       new->start_addr = address64.minimum;
-       new->length = address64.address_length;
+       new->start_addr = address64.address.minimum;
+       new->length = address64.address.address_length;
        list_add_tail(&new->list, &mem_device->res_list);
 
        return AE_OK;
index e999496..ecd540a 100644 (file)
@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
                return;
        if (i > scsiback_max_buffer_pages) {
                n = min(num, i - scsiback_max_buffer_pages);
-               free_xenballooned_pages(n, page + num - n);
+               gnttab_free_pages(n, page + num - n);
                n = num - n;
        }
        spin_lock_irqsave(&free_pages_lock, flags);
@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
        spin_lock_irqsave(&free_pages_lock, flags);
        if (list_empty(&scsiback_free_pages)) {
                spin_unlock_irqrestore(&free_pages_lock, flags);
-               return alloc_xenballooned_pages(1, page, false);
+               return gnttab_alloc_pages(1, page);
        }
        page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
        list_del(&page[0]->lru);
@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
        while (free_pages_num) {
                if (get_free_page(&page))
                        BUG();
-               free_xenballooned_pages(1, &page);
+               gnttab_free_pages(1, &page);
        }
        scsiback_deregister_configfs();
        xenbus_unregister_driver(&scsiback_driver);
index 85534ea..9433e46 100644 (file)
@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type,
        }
 
        if (msg_type == XS_TRANSACTION_START) {
-               trans->handle.id = simple_strtoul(reply, NULL, 0);
-
-               list_add(&trans->list, &u->transactions);
-       } else if (msg_type == XS_TRANSACTION_END) {
+               if (u->u.msg.type == XS_ERROR)
+                       kfree(trans);
+               else {
+                       trans->handle.id = simple_strtoul(reply, NULL, 0);
+                       list_add(&trans->list, &u->transactions);
+               }
+       } else if (u->u.msg.type == XS_TRANSACTION_END) {
                list_for_each_entry(trans, &u->transactions, list)
                        if (trans->handle.id == u->u.msg.tx_id)
                                break;
index c35c5c6..06ea5cd 100644 (file)
@@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
        return err;
 }
 
-/**
- * Must be called with lock_flocks() already held. Fills in the passed
- * counter variables, so you can prepare pagelist metadata before calling
- * ceph_encode_locks.
+/*
+ * Fills in the passed counter variables, so you can prepare pagelist metadata
+ * before calling ceph_encode_locks.
  */
 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 {
-       struct file_lock *lock;
+       struct file_lock_context *ctx;
 
        *fcntl_count = 0;
        *flock_count = 0;
 
-       for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
-               if (lock->fl_flags & FL_POSIX)
-                       ++(*fcntl_count);
-               else if (lock->fl_flags & FL_FLOCK)
-                       ++(*flock_count);
+       ctx = inode->i_flctx;
+       if (ctx) {
+               *fcntl_count = ctx->flc_posix_cnt;
+               *flock_count = ctx->flc_flock_cnt;
        }
        dout("counted %d flock locks and %d fcntl locks",
             *flock_count, *fcntl_count);
@@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
                                int num_fcntl_locks, int num_flock_locks)
 {
        struct file_lock *lock;
+       struct file_lock_context *ctx = inode->i_flctx;
        int err = 0;
        int seen_fcntl = 0;
        int seen_flock = 0;
@@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
        dout("encoding %d flock and %d fcntl locks", num_flock_locks,
             num_fcntl_locks);
 
-       for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
-               if (lock->fl_flags & FL_POSIX) {
-                       ++seen_fcntl;
-                       if (seen_fcntl > num_fcntl_locks) {
-                               err = -ENOSPC;
-                               goto fail;
-                       }
-                       err = lock_to_ceph_filelock(lock, &flocks[l]);
-                       if (err)
-                               goto fail;
-                       ++l;
+       if (!ctx)
+               return 0;
+
+       spin_lock(&ctx->flc_lock);
+       list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+               ++seen_fcntl;
+               if (seen_fcntl > num_fcntl_locks) {
+                       err = -ENOSPC;
+                       goto fail;
                }
+               err = lock_to_ceph_filelock(lock, &flocks[l]);
+               if (err)
+                       goto fail;
+               ++l;
        }
-       for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
-               if (lock->fl_flags & FL_FLOCK) {
-                       ++seen_flock;
-                       if (seen_flock > num_flock_locks) {
-                               err = -ENOSPC;
-                               goto fail;
-                       }
-                       err = lock_to_ceph_filelock(lock, &flocks[l]);
-                       if (err)
-                               goto fail;
-                       ++l;
+       list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+               ++seen_flock;
+               if (seen_flock > num_flock_locks) {
+                       err = -ENOSPC;
+                       goto fail;
                }
+               err = lock_to_ceph_filelock(lock, &flocks[l]);
+               if (err)
+                       goto fail;
+               ++l;
        }
 fail:
+       spin_unlock(&ctx->flc_lock);
        return err;
 }
 
index d2171f4..5f62fb7 100644 (file)
@@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                struct ceph_filelock *flocks;
 
 encode_again:
-               spin_lock(&inode->i_lock);
                ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
-               spin_unlock(&inode->i_lock);
                flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
                                 sizeof(struct ceph_filelock), GFP_NOFS);
                if (!flocks) {
                        err = -ENOMEM;
                        goto out_free;
                }
-               spin_lock(&inode->i_lock);
                err = ceph_encode_locks_to_buffer(inode, flocks,
                                                  num_fcntl_locks,
                                                  num_flock_locks);
-               spin_unlock(&inode->i_lock);
                if (err) {
                        kfree(flocks);
                        if (err == -ENOSPC)
index 294ff30..8fe1f7a 100644 (file)
@@ -1113,11 +1113,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        return rc;
 }
 
-/* copied from fs/locks.c with a name change */
-#define cifs_for_each_lock(inode, lockp) \
-       for (lockp = &inode->i_flock; *lockp != NULL; \
-            lockp = &(*lockp)->fl_next)
-
 struct lock_to_push {
        struct list_head llist;
        __u64 offset;
@@ -1132,8 +1127,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
 {
        struct inode *inode = cfile->dentry->d_inode;
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
-       struct file_lock *flock, **before;
-       unsigned int count = 0, i = 0;
+       struct file_lock *flock;
+       struct file_lock_context *flctx = inode->i_flctx;
+       unsigned int i;
        int rc = 0, xid, type;
        struct list_head locks_to_send, *el;
        struct lock_to_push *lck, *tmp;
@@ -1141,21 +1137,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
 
        xid = get_xid();
 
-       spin_lock(&inode->i_lock);
-       cifs_for_each_lock(inode, before) {
-               if ((*before)->fl_flags & FL_POSIX)
-                       count++;
-       }
-       spin_unlock(&inode->i_lock);
+       if (!flctx)
+               goto out;
 
        INIT_LIST_HEAD(&locks_to_send);
 
        /*
-        * Allocating count locks is enough because no FL_POSIX locks can be
-        * added to the list while we are holding cinode->lock_sem that
+        * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
+        * can be added to the list while we are holding cinode->lock_sem that
         * protects locking operations of this inode.
         */
-       for (; i < count; i++) {
+       for (i = 0; i < flctx->flc_posix_cnt; i++) {
                lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
                if (!lck) {
                        rc = -ENOMEM;
@@ -1165,11 +1157,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
        }
 
        el = locks_to_send.next;
-       spin_lock(&inode->i_lock);
-       cifs_for_each_lock(inode, before) {
-               flock = *before;
-               if ((flock->fl_flags & FL_POSIX) == 0)
-                       continue;
+       spin_lock(&flctx->flc_lock);
+       list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
                if (el == &locks_to_send) {
                        /*
                         * The list ended. We don't have enough allocated
@@ -1189,9 +1178,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
                lck->length = length;
                lck->type = type;
                lck->offset = flock->fl_start;
-               el = el->next;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&flctx->flc_lock);
 
        list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
                int stored_rc;
index 9b4e7d7..d4dbf3c 100644 (file)
@@ -466,6 +466,8 @@ static void ext3_put_super (struct super_block * sb)
        }
        sb->s_fs_info = NULL;
        kfree(sbi->s_blockgroup_lock);
+       mutex_destroy(&sbi->s_orphan_lock);
+       mutex_destroy(&sbi->s_resize_lock);
        kfree(sbi);
 }
 
index 74c5f53..ac64edb 100644 (file)
@@ -1046,10 +1046,7 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
                         struct path *path);
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
-                                int format_id);
 static int ext4_quota_off(struct super_block *sb, int type);
-static int ext4_quota_off_sysfile(struct super_block *sb, int type);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
                               size_t len, loff_t off);
@@ -1084,16 +1081,6 @@ static const struct quotactl_ops ext4_qctl_operations = {
        .get_dqblk      = dquot_get_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
-
-static const struct quotactl_ops ext4_qctl_sysfile_operations = {
-       .quota_on_meta  = ext4_quota_on_sysfile,
-       .quota_off      = ext4_quota_off_sysfile,
-       .quota_sync     = dquot_quota_sync,
-       .get_info       = dquot_get_dqinfo,
-       .set_info       = dquot_set_dqinfo,
-       .get_dqblk      = dquot_get_dqblk,
-       .set_dqblk      = dquot_set_dqblk
-};
 #endif
 
 static const struct super_operations ext4_sops = {
@@ -3935,7 +3922,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_QUOTA
        sb->dq_op = &ext4_quota_operations;
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-               sb->s_qcop = &ext4_qctl_sysfile_operations;
+               sb->s_qcop = &dquot_quotactl_sysfile_ops;
        else
                sb->s_qcop = &ext4_qctl_operations;
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
@@ -5288,21 +5275,6 @@ static int ext4_enable_quotas(struct super_block *sb)
        return 0;
 }
 
-/*
- * quota_on function that is used when QUOTA feature is set.
- */
-static int ext4_quota_on_sysfile(struct super_block *sb, int type,
-                                int format_id)
-{
-       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-               return -EINVAL;
-
-       /*
-        * USAGE was enabled at mount time. Only need to enable LIMITS now.
-        */
-       return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
-}
-
 static int ext4_quota_off(struct super_block *sb, int type)
 {
        struct inode *inode = sb_dqopt(sb)->files[type];
@@ -5329,18 +5301,6 @@ out:
        return dquot_quota_off(sb, type);
 }
 
-/*
- * quota_off function that is used when QUOTA feature is set.
- */
-static int ext4_quota_off_sysfile(struct super_block *sb, int type)
-{
-       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
-               return -EINVAL;
-
-       /* Disable only the limits. */
-       return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
  * acquiring the locks... As quota files are never truncated and quota code
  * itself serializes the operations (and no one else should touch the files)
index 3088e2a..7b31430 100644 (file)
@@ -73,7 +73,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 
        BUG_ON(name == NULL);
 
-       if (acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
+       if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
                return -E2BIG;
 
        if (type == ACL_TYPE_ACCESS) {
index c5a34f0..6371192 100644 (file)
@@ -1896,7 +1896,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
 
        ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
        if (ht == NULL)
-               ht = vzalloc(size);
+               ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
+                              PAGE_KERNEL);
        if (!ht)
                return -ENOMEM;
 
index a23524a..aeb7bc9 100644 (file)
@@ -173,19 +173,14 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
        spin_unlock(&lru_lock);
 }
 
-static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
 {
+       spin_lock(&lru_lock);
        if (!list_empty(&gl->gl_lru)) {
                list_del_init(&gl->gl_lru);
                atomic_dec(&lru_count);
                clear_bit(GLF_LRU, &gl->gl_flags);
        }
-}
-
-static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
-{
-       spin_lock(&lru_lock);
-       __gfs2_glock_remove_from_lru(gl);
        spin_unlock(&lru_lock);
 }
 
@@ -205,9 +200,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
 
        lockref_mark_dead(&gl->gl_lockref);
 
-       spin_lock(&lru_lock);
-       __gfs2_glock_remove_from_lru(gl);
-       spin_unlock(&lru_lock);
+       gfs2_glock_remove_from_lru(gl);
        spin_unlock(&gl->gl_lockref.lock);
        spin_lock_bucket(gl->gl_hash);
        hlist_bl_del_rcu(&gl->gl_list);
index 9054002..73c7225 100644 (file)
@@ -543,10 +543,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
        }
 
        error = gfs2_dir_add(&dip->i_inode, name, ip, da);
-       if (error)
-               goto fail_end_trans;
 
-fail_end_trans:
        gfs2_trans_end(sdp);
 fail_ipreserv:
        gfs2_inplace_release(dip);
index 573bd3b..1b64577 100644 (file)
@@ -439,7 +439,7 @@ static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
 
         ls->ls_recover_jid_done = jid;
         ls->ls_recover_jid_status = message;
-       sprintf(env_jid, "JID=%d", jid);
+       sprintf(env_jid, "JID=%u", jid);
        sprintf(env_status, "RECOVERY=%s",
                message == LM_RD_SUCCESS ? "Done" : "Failed");
         kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
index 3ab566b..ae8e881 100644 (file)
@@ -96,7 +96,7 @@ static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
        struct super_block *sb = sdp->sd_vfs;
        int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", frozen);
+       return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
 }
 
 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
index c760fac..3a53b1d 100644 (file)
@@ -194,7 +194,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
 #ifdef CONFIG_FSNOTIFY
        inode->i_fsnotify_mask = 0;
 #endif
-
+       inode->i_flctx = NULL;
        this_cpu_inc(nr_inodes);
 
        return 0;
@@ -237,6 +237,7 @@ void __destroy_inode(struct inode *inode)
        BUG_ON(inode_has_buffers(inode));
        security_inode_free(inode);
        fsnotify_inode_delete(inode);
+       locks_free_lock_context(inode->i_flctx);
        if (!inode->i_nlink) {
                WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
                atomic_long_dec(&inode->i_sb->s_remove_count);
index 01e1ee7..005a15c 100644 (file)
@@ -2,6 +2,7 @@
  *  linux/fs/isofs/util.c
  */
 
+#include <linux/time.h>
 #include "isofs.h"
 
 /* 
@@ -17,9 +18,9 @@
 int iso_date(char * p, int flag)
 {
        int year, month, day, hour, minute, second, tz;
-       int crtime, days, i;
+       int crtime;
 
-       year = p[0] - 70;
+       year = p[0];
        month = p[1];
        day = p[2];
        hour = p[3];
@@ -31,18 +32,7 @@ int iso_date(char * p, int flag)
        if (year < 0) {
                crtime = 0;
        } else {
-               int monlen[12] = {31,28,31,30,31,30,31,31,30,31,30,31};
-
-               days = year * 365;
-               if (year > 2)
-                       days += (year+1) / 4;
-               for (i = 1; i < month; i++)
-                       days += monlen[i-1];
-               if (((year+2) % 4) == 0 && month > 2)
-                       days++;
-               days += day - 1;
-               crtime = ((((days * 24) + hour) * 60 + minute) * 60)
-                       + second;
+               crtime = mktime64(year+1900, month, day, hour, minute, second);
 
                /* sign extend */
                if (tz & 0x80)
index d12ff4e..665ef5a 100644 (file)
@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
 {
        struct inode     *inode = nlmsvc_file_inode(file);
        struct file_lock *fl;
+       struct file_lock_context *flctx = inode->i_flctx;
        struct nlm_host  *lockhost;
 
+       if (!flctx || list_empty_careful(&flctx->flc_posix))
+               return 0;
 again:
        file->f_locks = 0;
-       spin_lock(&inode->i_lock);
-       for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+       spin_lock(&flctx->flc_lock);
+       list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
                if (fl->fl_lmops != &nlmsvc_lock_operations)
                        continue;
 
@@ -180,7 +183,7 @@ again:
                if (match(lockhost, host)) {
                        struct file_lock lock = *fl;
 
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&flctx->flc_lock);
                        lock.fl_type  = F_UNLCK;
                        lock.fl_start = 0;
                        lock.fl_end   = OFFSET_MAX;
@@ -192,7 +195,7 @@ again:
                        goto again;
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&flctx->flc_lock);
 
        return 0;
 }
@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
 {
        struct inode     *inode = nlmsvc_file_inode(file);
        struct file_lock *fl;
+       struct file_lock_context *flctx = inode->i_flctx;
 
        if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
                return 1;
 
-       spin_lock(&inode->i_lock);
-       for (fl = inode->i_flock; fl; fl = fl->fl_next) {
-               if (fl->fl_lmops == &nlmsvc_lock_operations) {
-                       spin_unlock(&inode->i_lock);
-                       return 1;
+       if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+               spin_lock(&flctx->flc_lock);
+               list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+                       if (fl->fl_lmops == &nlmsvc_lock_operations) {
+                               spin_unlock(&flctx->flc_lock);
+                               return 1;
+                       }
                }
+               spin_unlock(&flctx->flc_lock);
        }
-       spin_unlock(&inode->i_lock);
        file->f_locks = 0;
        return 0;
 }
index 59e2f90..4d0d411 100644 (file)
@@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl)
 int leases_enable = 1;
 int lease_break_time = 45;
 
-#define for_each_lock(inode, lockp) \
-       for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
-
 /*
  * The global file_lock_list is only used for displaying /proc/locks, so we
  * keep a list on each CPU, with each list protected by its own spinlock via
  * the file_lock_lglock. Note that alterations to the list also require that
- * the relevant i_lock is held.
+ * the relevant flc_lock is held.
  */
 DEFINE_STATIC_LGLOCK(file_lock_lglock);
 static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
  * contrast to those that are acting as records of acquired locks).
  *
  * Note that when we acquire this lock in order to change the above fields,
- * we often hold the i_lock as well. In certain cases, when reading the fields
+ * we often hold the flc_lock as well. In certain cases, when reading the fields
  * protected by this lock, we can skip acquiring it iff we already hold the
- * i_lock.
+ * flc_lock.
  *
  * In particular, adding an entry to the fl_block list requires that you hold
- * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
- * an entry from the list however only requires the file_lock_lock.
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_lock.
  */
 static DEFINE_SPINLOCK(blocked_lock_lock);
 
+static struct kmem_cache *flctx_cache __read_mostly;
 static struct kmem_cache *filelock_cache __read_mostly;
 
+static struct file_lock_context *
+locks_get_lock_context(struct inode *inode)
+{
+       struct file_lock_context *new;
+
+       if (likely(inode->i_flctx))
+               goto out;
+
+       new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
+       if (!new)
+               goto out;
+
+       spin_lock_init(&new->flc_lock);
+       INIT_LIST_HEAD(&new->flc_flock);
+       INIT_LIST_HEAD(&new->flc_posix);
+       INIT_LIST_HEAD(&new->flc_lease);
+
+       /*
+        * Assign the pointer if it's not already assigned. If it is, then
+        * free the context we just allocated.
+        */
+       spin_lock(&inode->i_lock);
+       if (likely(!inode->i_flctx)) {
+               inode->i_flctx = new;
+               new = NULL;
+       }
+       spin_unlock(&inode->i_lock);
+
+       if (new)
+               kmem_cache_free(flctx_cache, new);
+out:
+       return inode->i_flctx;
+}
+
+void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+       if (ctx) {
+               WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
+               WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
+               WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
+               kmem_cache_free(flctx_cache, ctx);
+       }
+}
+
 static void locks_init_lock_heads(struct file_lock *fl)
 {
        INIT_HLIST_NODE(&fl->fl_link);
+       INIT_LIST_HEAD(&fl->fl_list);
        INIT_LIST_HEAD(&fl->fl_block);
        init_waitqueue_head(&fl->fl_wait);
 }
@@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private);
 void locks_free_lock(struct file_lock *fl)
 {
        BUG_ON(waitqueue_active(&fl->fl_wait));
+       BUG_ON(!list_empty(&fl->fl_list));
        BUG_ON(!list_empty(&fl->fl_block));
        BUG_ON(!hlist_unhashed(&fl->fl_link));
 
@@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose)
        struct file_lock *fl;
 
        while (!list_empty(dispose)) {
-               fl = list_first_entry(dispose, struct file_lock, fl_block);
-               list_del_init(&fl->fl_block);
+               fl = list_first_entry(dispose, struct file_lock, fl_list);
+               list_del_init(&fl->fl_list);
                locks_free_lock(fl);
        }
 }
@@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
        return fl1->fl_owner == fl2->fl_owner;
 }
 
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
 static void locks_insert_global_locks(struct file_lock *fl)
 {
        lg_local_lock(&file_lock_lglock);
@@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
        lg_local_unlock(&file_lock_lglock);
 }
 
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
 static void locks_delete_global_locks(struct file_lock *fl)
 {
        /*
         * Avoid taking lock if already unhashed. This is safe since this check
-        * is done while holding the i_lock, and new insertions into the list
+        * is done while holding the flc_lock, and new insertions into the list
         * also require that it be held.
         */
        if (hlist_unhashed(&fl->fl_link))
@@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
  * the order they blocked. The documentation doesn't require this but
  * it seems like the reasonable thing to do.
  *
- * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
- * list itself is protected by the blocked_lock_lock, but by ensuring that the
- * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
- * in some cases when we see that the fl_block list is empty.
+ * Must be called with both the flc_lock and blocked_lock_lock held. The
+ * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * that the flc_lock is also held on insertions we can avoid taking the
+ * blocked_lock_lock in some cases when we see that the fl_block list is empty.
  */
 static void __locks_insert_block(struct file_lock *blocker,
                                        struct file_lock *waiter)
@@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
                locks_insert_global_blocked(waiter);
 }
 
-/* Must be called with i_lock held. */
+/* Must be called with flc_lock held. */
 static void locks_insert_block(struct file_lock *blocker,
                                        struct file_lock *waiter)
 {
@@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
 /*
  * Wake up processes blocked waiting for blocker.
  *
- * Must be called with the inode->i_lock held!
+ * Must be called with the inode->flc_lock held!
  */
 static void locks_wake_up_blocks(struct file_lock *blocker)
 {
        /*
         * Avoid taking global lock if list is empty. This is safe since new
-        * blocked requests are only added to the list under the i_lock, and
-        * the i_lock is always held here. Note that removal from the fl_block
-        * list does not require the i_lock, so we must recheck list_empty()
+        * blocked requests are only added to the list under the flc_lock, and
+        * the flc_lock is always held here. Note that removal from the fl_block
+        * list does not require the flc_lock, so we must recheck list_empty()
         * after acquiring the blocked_lock_lock.
         */
        if (list_empty(&blocker->fl_block))
@@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
        spin_unlock(&blocked_lock_lock);
 }
 
-/* Insert file lock fl into an inode's lock list at the position indicated
- * by pos. At the same time add the lock to the global file lock list.
- *
- * Must be called with the i_lock held!
- */
-static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
+static void
+locks_insert_lock_ctx(struct file_lock *fl, int *counter,
+                     struct list_head *before)
 {
        fl->fl_nspid = get_pid(task_tgid(current));
-
-       /* insert into file's list */
-       fl->fl_next = *pos;
-       *pos = fl;
-
+       list_add_tail(&fl->fl_list, before);
+       ++*counter;
        locks_insert_global_locks(fl);
 }
 
-/**
- * locks_delete_lock - Delete a lock and then free it.
- * @thisfl_p: pointer that points to the fl_next field of the previous
- *           inode->i_flock list entry
- *
- * Unlink a lock from all lists and free the namespace reference, but don't
- * free it yet. Wake up processes that are blocked waiting for this lock and
- * notify the FS that the lock has been cleared.
- *
- * Must be called with the i_lock held!
- */
-static void locks_unlink_lock(struct file_lock **thisfl_p)
+static void
+locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
 {
-       struct file_lock *fl = *thisfl_p;
-
        locks_delete_global_locks(fl);
-
-       *thisfl_p = fl->fl_next;
-       fl->fl_next = NULL;
-
+       list_del_init(&fl->fl_list);
+       --*counter;
        if (fl->fl_nspid) {
                put_pid(fl->fl_nspid);
                fl->fl_nspid = NULL;
        }
-
        locks_wake_up_blocks(fl);
 }
 
-/*
- * Unlink a lock from all lists and free it.
- *
- * Must be called with i_lock held!
- */
-static void locks_delete_lock(struct file_lock **thisfl_p,
-                             struct list_head *dispose)
+static void
+locks_delete_lock_ctx(struct file_lock *fl, int *counter,
+                     struct list_head *dispose)
 {
-       struct file_lock *fl = *thisfl_p;
-
-       locks_unlink_lock(thisfl_p);
+       locks_unlink_lock_ctx(fl, counter);
        if (dispose)
-               list_add(&fl->fl_block, dispose);
+               list_add(&fl->fl_list, dispose);
        else
                locks_free_lock(fl);
 }
@@ -746,22 +764,27 @@ void
 posix_test_lock(struct file *filp, struct file_lock *fl)
 {
        struct file_lock *cfl;
+       struct file_lock_context *ctx;
        struct inode *inode = file_inode(filp);
 
-       spin_lock(&inode->i_lock);
-       for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
-               if (!IS_POSIX(cfl))
-                       continue;
-               if (posix_locks_conflict(fl, cfl))
-                       break;
-       }
-       if (cfl) {
-               locks_copy_conflock(fl, cfl);
-               if (cfl->fl_nspid)
-                       fl->fl_pid = pid_vnr(cfl->fl_nspid);
-       } else
+       ctx = inode->i_flctx;
+       if (!ctx || list_empty_careful(&ctx->flc_posix)) {
                fl->fl_type = F_UNLCK;
-       spin_unlock(&inode->i_lock);
+               return;
+       }
+
+       spin_lock(&ctx->flc_lock);
+       list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
+               if (posix_locks_conflict(fl, cfl)) {
+                       locks_copy_conflock(fl, cfl);
+                       if (cfl->fl_nspid)
+                               fl->fl_pid = pid_vnr(cfl->fl_nspid);
+                       goto out;
+               }
+       }
+       fl->fl_type = F_UNLCK;
+out:
+       spin_unlock(&ctx->flc_lock);
        return;
 }
 EXPORT_SYMBOL(posix_test_lock);
@@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
 static int flock_lock_file(struct file *filp, struct file_lock *request)
 {
        struct file_lock *new_fl = NULL;
-       struct file_lock **before;
-       struct inode * inode = file_inode(filp);
+       struct file_lock *fl;
+       struct file_lock_context *ctx;
+       struct inode *inode = file_inode(filp);
        int error = 0;
-       int found = 0;
+       bool found = false;
        LIST_HEAD(dispose);
 
+       ctx = locks_get_lock_context(inode);
+       if (!ctx)
+               return -ENOMEM;
+
        if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
                new_fl = locks_alloc_lock();
                if (!new_fl)
                        return -ENOMEM;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ctx->flc_lock);
        if (request->fl_flags & FL_ACCESS)
                goto find_conflict;
 
-       for_each_lock(inode, before) {
-               struct file_lock *fl = *before;
-               if (IS_POSIX(fl))
-                       break;
-               if (IS_LEASE(fl))
-                       continue;
+       list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
                if (filp != fl->fl_file)
                        continue;
                if (request->fl_type == fl->fl_type)
                        goto out;
-               found = 1;
-               locks_delete_lock(before, &dispose);
+               found = true;
+               locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
                break;
        }
 
@@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
         * give it the opportunity to lock the file.
         */
        if (found) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ctx->flc_lock);
                cond_resched();
-               spin_lock(&inode->i_lock);
+               spin_lock(&ctx->flc_lock);
        }
 
 find_conflict:
-       for_each_lock(inode, before) {
-               struct file_lock *fl = *before;
-               if (IS_POSIX(fl))
-                       break;
-               if (IS_LEASE(fl))
-                       continue;
+       list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
                if (!flock_locks_conflict(request, fl))
                        continue;
                error = -EAGAIN;
@@ -911,12 +929,12 @@ find_conflict:
        if (request->fl_flags & FL_ACCESS)
                goto out;
        locks_copy_lock(new_fl, request);
-       locks_insert_lock(before, new_fl);
+       locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
        new_fl = NULL;
        error = 0;
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ctx->flc_lock);
        if (new_fl)
                locks_free_lock(new_fl);
        locks_dispose_list(&dispose);
@@ -925,16 +943,20 @@ out:
 
 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
 {
-       struct file_lock *fl;
+       struct file_lock *fl, *tmp;
        struct file_lock *new_fl = NULL;
        struct file_lock *new_fl2 = NULL;
        struct file_lock *left = NULL;
        struct file_lock *right = NULL;
-       struct file_lock **before;
+       struct file_lock_context *ctx;
        int error;
        bool added = false;
        LIST_HEAD(dispose);
 
+       ctx = locks_get_lock_context(inode);
+       if (!ctx)
+               return -ENOMEM;
+
        /*
         * We may need two file_lock structures for this operation,
         * so we get them in advance to avoid races.
@@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                new_fl2 = locks_alloc_lock();
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ctx->flc_lock);
        /*
         * New lock request. Walk all POSIX locks and look for conflicts. If
         * there are any, either return error or put the request on the
         * blocker's list of waiters and the global blocked_hash.
         */
        if (request->fl_type != F_UNLCK) {
-               for_each_lock(inode, before) {
-                       fl = *before;
+               list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
                        if (!IS_POSIX(fl))
                                continue;
                        if (!posix_locks_conflict(request, fl))
@@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
        if (request->fl_flags & FL_ACCESS)
                goto out;
 
-       /*
-        * Find the first old lock with the same owner as the new lock.
-        */
-       
-       before = &inode->i_flock;
-
-       /* First skip locks owned by other processes.  */
-       while ((fl = *before) && (!IS_POSIX(fl) ||
-                                 !posix_same_owner(request, fl))) {
-               before = &fl->fl_next;
+       /* Find the first old lock with the same owner as the new lock */
+       list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
+               if (posix_same_owner(request, fl))
+                       break;
        }
 
        /* Process locks with this owner. */
-       while ((fl = *before) && posix_same_owner(request, fl)) {
-               /* Detect adjacent or overlapping regions (if same lock type)
-                */
+       list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
+               if (!posix_same_owner(request, fl))
+                       break;
+
+               /* Detect adjacent or overlapping regions (if same lock type) */
                if (request->fl_type == fl->fl_type) {
                        /* In all comparisons of start vs end, use
                         * "start - 1" rather than "end + 1". If end
                         * is OFFSET_MAX, end + 1 will become negative.
                         */
                        if (fl->fl_end < request->fl_start - 1)
-                               goto next_lock;
+                               continue;
                        /* If the next lock in the list has entirely bigger
                         * addresses than the new one, insert the lock here.
                         */
@@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        else
                                request->fl_end = fl->fl_end;
                        if (added) {
-                               locks_delete_lock(before, &dispose);
+                               locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
+                                                       &dispose);
                                continue;
                        }
                        request = fl;
                        added = true;
-               }
-               else {
+               } else {
                        /* Processing for different lock types is a bit
                         * more complex.
                         */
                        if (fl->fl_end < request->fl_start)
-                               goto next_lock;
+                               continue;
                        if (fl->fl_start > request->fl_end)
                                break;
                        if (request->fl_type == F_UNLCK)
@@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                                 * one (This may happen several times).
                                 */
                                if (added) {
-                                       locks_delete_lock(before, &dispose);
+                                       locks_delete_lock_ctx(fl,
+                                               &ctx->flc_posix_cnt, &dispose);
                                        continue;
                                }
                                /*
@@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                                locks_copy_lock(new_fl, request);
                                request = new_fl;
                                new_fl = NULL;
-                               locks_delete_lock(before, &dispose);
-                               locks_insert_lock(before, request);
+                               locks_insert_lock_ctx(request,
+                                       &ctx->flc_posix_cnt, &fl->fl_list);
+                               locks_delete_lock_ctx(fl,
+                                       &ctx->flc_posix_cnt, &dispose);
                                added = true;
                        }
                }
-               /* Go on to next lock.
-                */
-       next_lock:
-               before = &fl->fl_next;
        }
 
        /*
@@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        goto out;
                }
                locks_copy_lock(new_fl, request);
-               locks_insert_lock(before, new_fl);
+               locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
+                                       &fl->fl_list);
                new_fl = NULL;
        }
        if (right) {
@@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                        left = new_fl2;
                        new_fl2 = NULL;
                        locks_copy_lock(left, right);
-                       locks_insert_lock(before, left);
+                       locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
+                                               &fl->fl_list);
                }
                right->fl_start = request->fl_end + 1;
                locks_wake_up_blocks(right);
@@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
                locks_wake_up_blocks(left);
        }
  out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ctx->flc_lock);
        /*
         * Free any unused locks.
         */
@@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
  */
 int locks_mandatory_locked(struct file *file)
 {
+       int ret;
        struct inode *inode = file_inode(file);
+       struct file_lock_context *ctx;
        struct file_lock *fl;
 
+       ctx = inode->i_flctx;
+       if (!ctx || list_empty_careful(&ctx->flc_posix))
+               return 0;
+
        /*
         * Search the lock list for this inode for any POSIX locks.
         */
-       spin_lock(&inode->i_lock);
-       for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
-               if (!IS_POSIX(fl))
-                       continue;
+       spin_lock(&ctx->flc_lock);
+       ret = 0;
+       list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
                if (fl->fl_owner != current->files &&
-                   fl->fl_owner != file)
+                   fl->fl_owner != file) {
+                       ret = -EAGAIN;
                        break;
+               }
        }
-       spin_unlock(&inode->i_lock);
-       return fl ? -EAGAIN : 0;
+       spin_unlock(&ctx->flc_lock);
+       return ret;
 }
 
 /**
@@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
 }
 
 /* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
+int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
 {
-       struct file_lock *fl = *before;
+       struct file_lock_context *flctx;
        int error = assign_type(fl, arg);
 
        if (error)
@@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
        if (arg == F_UNLCK) {
                struct file *filp = fl->fl_file;
 
+               flctx = file_inode(filp)->i_flctx;
                f_delown(filp);
                filp->f_owner.signum = 0;
                fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
                        printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
                        fl->fl_fasync = NULL;
                }
-               locks_delete_lock(before, dispose);
+               locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
        }
        return 0;
 }
@@ -1329,20 +1355,17 @@ static bool past_time(unsigned long then)
 
 static void time_out_leases(struct inode *inode, struct list_head *dispose)
 {
-       struct file_lock **before;
-       struct file_lock *fl;
+       struct file_lock_context *ctx = inode->i_flctx;
+       struct file_lock *fl, *tmp;
 
-       lockdep_assert_held(&inode->i_lock);
+       lockdep_assert_held(&ctx->flc_lock);
 
-       before = &inode->i_flock;
-       while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
+       list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
                trace_time_out_leases(inode, fl);
                if (past_time(fl->fl_downgrade_time))
-                       lease_modify(before, F_RDLCK, dispose);
+                       lease_modify(fl, F_RDLCK, dispose);
                if (past_time(fl->fl_break_time))
-                       lease_modify(before, F_UNLCK, dispose);
-               if (fl == *before)      /* lease_modify may have freed fl */
-                       before = &fl->fl_next;
+                       lease_modify(fl, F_UNLCK, dispose);
        }
 }
 
@@ -1356,11 +1379,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
 static bool
 any_leases_conflict(struct inode *inode, struct file_lock *breaker)
 {
+       struct file_lock_context *ctx = inode->i_flctx;
        struct file_lock *fl;
 
-       lockdep_assert_held(&inode->i_lock);
+       lockdep_assert_held(&ctx->flc_lock);
 
-       for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
+       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
                if (leases_conflict(fl, breaker))
                        return true;
        }
@@ -1384,7 +1408,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
 {
        int error = 0;
        struct file_lock *new_fl;
-       struct file_lock *fl, **before;
+       struct file_lock_context *ctx = inode->i_flctx;
+       struct file_lock *fl;
        unsigned long break_time;
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
        LIST_HEAD(dispose);
@@ -1394,7 +1419,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                return PTR_ERR(new_fl);
        new_fl->fl_flags = type;
 
-       spin_lock(&inode->i_lock);
+       /* typically we will check that ctx is non-NULL before calling */
+       if (!ctx) {
+               WARN_ON_ONCE(1);
+               return error;
+       }
+
+       spin_lock(&ctx->flc_lock);
 
        time_out_leases(inode, &dispose);
 
@@ -1408,9 +1439,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                        break_time++;   /* so that 0 means no break time */
        }
 
-       for (before = &inode->i_flock;
-                       ((fl = *before) != NULL) && IS_LEASE(fl);
-                       before = &fl->fl_next) {
+       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
                if (!leases_conflict(fl, new_fl))
                        continue;
                if (want_write) {
@@ -1419,17 +1448,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
                        fl->fl_flags |= FL_UNLOCK_PENDING;
                        fl->fl_break_time = break_time;
                } else {
-                       if (lease_breaking(inode->i_flock))
+                       if (lease_breaking(fl))
                                continue;
                        fl->fl_flags |= FL_DOWNGRADE_PENDING;
                        fl->fl_downgrade_time = break_time;
                }
                if (fl->fl_lmops->lm_break(fl))
-                       locks_delete_lock(before, &dispose);
+                       locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
+                                               &dispose);
        }
 
-       fl = inode->i_flock;
-       if (!fl || !IS_LEASE(fl))
+       if (list_empty(&ctx->flc_lease))
                goto out;
 
        if (mode & O_NONBLOCK) {
@@ -1439,18 +1468,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
        }
 
 restart:
-       break_time = inode->i_flock->fl_break_time;
+       fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
+       break_time = fl->fl_break_time;
        if (break_time != 0)
                break_time -= jiffies;
        if (break_time == 0)
                break_time++;
-       locks_insert_block(inode->i_flock, new_fl);
+       locks_insert_block(fl, new_fl);
        trace_break_lease_block(inode, new_fl);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ctx->flc_lock);
        locks_dispose_list(&dispose);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
                                                !new_fl->fl_next, break_time);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ctx->flc_lock);
        trace_break_lease_unblock(inode, new_fl);
        locks_delete_block(new_fl);
        if (error >= 0) {
@@ -1462,12 +1492,10 @@ restart:
                        time_out_leases(inode, &dispose);
                if (any_leases_conflict(inode, new_fl))
                        goto restart;
-
                error = 0;
        }
-
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ctx->flc_lock);
        locks_dispose_list(&dispose);
        locks_free_lock(new_fl);
        return error;
@@ -1487,14 +1515,18 @@ EXPORT_SYMBOL(__break_lease);
 void lease_get_mtime(struct inode *inode, struct timespec *time)
 {
        bool has_lease = false;
-       struct file_lock *flock;
+       struct file_lock_context *ctx = inode->i_flctx;
+       struct file_lock *fl;
 
-       if (inode->i_flock) {
-               spin_lock(&inode->i_lock);
-               flock = inode->i_flock;
-               if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
-                       has_lease = true;
-               spin_unlock(&inode->i_lock);
+       if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+               spin_lock(&ctx->flc_lock);
+               if (!list_empty(&ctx->flc_lease)) {
+                       fl = list_first_entry(&ctx->flc_lease,
+                                               struct file_lock, fl_list);
+                       if (fl->fl_type == F_WRLCK)
+                               has_lease = true;
+               }
+               spin_unlock(&ctx->flc_lock);
        }
 
        if (has_lease)
@@ -1532,20 +1564,22 @@ int fcntl_getlease(struct file *filp)
 {
        struct file_lock *fl;
        struct inode *inode = file_inode(filp);
+       struct file_lock_context *ctx = inode->i_flctx;
        int type = F_UNLCK;
        LIST_HEAD(dispose);
 
-       spin_lock(&inode->i_lock);
-       time_out_leases(file_inode(filp), &dispose);
-       for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
-                       fl = fl->fl_next) {
-               if (fl->fl_file == filp) {
+       if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+               spin_lock(&ctx->flc_lock);
+               time_out_leases(file_inode(filp), &dispose);
+               list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+                       if (fl->fl_file != filp)
+                               continue;
                        type = target_leasetype(fl);
                        break;
                }
+               spin_unlock(&ctx->flc_lock);
+               locks_dispose_list(&dispose);
        }
-       spin_unlock(&inode->i_lock);
-       locks_dispose_list(&dispose);
        return type;
 }
 
@@ -1578,9 +1612,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
 static int
 generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
 {
-       struct file_lock *fl, **before, **my_before = NULL, *lease;
+       struct file_lock *fl, *my_fl = NULL, *lease;
        struct dentry *dentry = filp->f_path.dentry;
        struct inode *inode = dentry->d_inode;
+       struct file_lock_context *ctx;
        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
        int error;
        LIST_HEAD(dispose);
@@ -1588,6 +1623,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        lease = *flp;
        trace_generic_add_lease(inode, lease);
 
+       ctx = locks_get_lock_context(inode);
+       if (!ctx)
+               return -ENOMEM;
+
        /*
         * In the delegation case we need mutual exclusion with
         * a number of operations that take the i_mutex.  We trylock
@@ -1606,7 +1645,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
                return -EINVAL;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ctx->flc_lock);
        time_out_leases(inode, &dispose);
        error = check_conflicting_open(dentry, arg);
        if (error)
@@ -1621,13 +1660,12 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
         * except for this filp.
         */
        error = -EAGAIN;
-       for (before = &inode->i_flock;
-                       ((fl = *before) != NULL) && IS_LEASE(fl);
-                       before = &fl->fl_next) {
+       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
                if (fl->fl_file == filp) {
-                       my_before = before;
+                       my_fl = fl;
                        continue;
                }
+
                /*
                 * No exclusive leases if someone else has a lease on
                 * this file:
@@ -1642,9 +1680,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
                        goto out;
        }
 
-       if (my_before != NULL) {
-               lease = *my_before;
-               error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
+       if (my_fl != NULL) {
+               error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
                if (error)
                        goto out;
                goto out_setup;
@@ -1654,7 +1691,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
        if (!leases_enable)
                goto out;
 
-       locks_insert_lock(before, lease);
+       locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
        /*
         * The check in break_lease() is lockless. It's possible for another
         * open to race in after we did the earlier check for a conflicting
@@ -1666,45 +1703,49 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
         */
        smp_mb();
        error = check_conflicting_open(dentry, arg);
-       if (error)
-               goto out_unlink;
+       if (error) {
+               locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
+               goto out;
+       }
 
 out_setup:
        if (lease->fl_lmops->lm_setup)
                lease->fl_lmops->lm_setup(lease, priv);
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ctx->flc_lock);
        locks_dispose_list(&dispose);
        if (is_deleg)
                mutex_unlock(&inode->i_mutex);
-       if (!error && !my_before)
+       if (!error && !my_fl)
                *flp = NULL;
        return error;
-out_unlink:
-       locks_unlink_lock(before);
-       goto out;
 }
 
 static int generic_delete_lease(struct file *filp)
 {
        int error = -EAGAIN;
-       struct file_lock *fl, **before;
+       struct file_lock *fl, *victim = NULL;
        struct dentry *dentry = filp->f_path.dentry;
        struct inode *inode = dentry->d_inode;
+       struct file_lock_context *ctx = inode->i_flctx;
        LIST_HEAD(dispose);
 
-       spin_lock(&inode->i_lock);
-       time_out_leases(inode, &dispose);
-       for (before = &inode->i_flock;
-                       ((fl = *before) != NULL) && IS_LEASE(fl);
-                       before = &fl->fl_next) {
-               if (fl->fl_file == filp)
+       if (!ctx) {
+               trace_generic_delete_lease(inode, NULL);
+               return error;
+       }
+
+       spin_lock(&ctx->flc_lock);
+       list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+               if (fl->fl_file == filp) {
+                       victim = fl;
                        break;
+               }
        }
        trace_generic_delete_lease(inode, fl);
-       if (fl && IS_LEASE(fl))
-               error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
-       spin_unlock(&inode->i_lock);
+       if (victim)
+               error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+       spin_unlock(&ctx->flc_lock);
        locks_dispose_list(&dispose);
        return error;
 }
@@ -2171,7 +2212,7 @@ again:
         */
        /*
         * we need that spin_lock here - it prevents reordering between
-        * update of inode->i_flock and check for it done in close().
+        * update of i_flctx->flc_posix and check for it done in close().
         * rcu_read_lock() wouldn't do.
         */
        spin_lock(&current->files->file_lock);
@@ -2331,13 +2372,14 @@ out:
 void locks_remove_posix(struct file *filp, fl_owner_t owner)
 {
        struct file_lock lock;
+       struct file_lock_context *ctx = file_inode(filp)->i_flctx;
 
        /*
         * If there are no locks held on this file, we don't need to call
         * posix_lock_file().  Another process could be setting a lock on this
         * file at the same time, but we wouldn't remove that lock anyway.
         */
-       if (!file_inode(filp)->i_flock)
+       if (!ctx || list_empty(&ctx->flc_posix))
                return;
 
        lock.fl_type = F_UNLCK;
@@ -2358,67 +2400,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
 
 EXPORT_SYMBOL(locks_remove_posix);
 
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_flock(struct file *filp)
+{
+       struct file_lock fl = {
+               .fl_owner = filp,
+               .fl_pid = current->tgid,
+               .fl_file = filp,
+               .fl_flags = FL_FLOCK,
+               .fl_type = F_UNLCK,
+               .fl_end = OFFSET_MAX,
+       };
+       struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+
+       if (list_empty(&flctx->flc_flock))
+               return;
+
+       if (filp->f_op->flock)
+               filp->f_op->flock(filp, F_SETLKW, &fl);
+       else
+               flock_lock_file(filp, &fl);
+
+       if (fl.fl_ops && fl.fl_ops->fl_release_private)
+               fl.fl_ops->fl_release_private(&fl);
+}
+
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_lease(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       struct file_lock_context *ctx = inode->i_flctx;
+       struct file_lock *fl, *tmp;
+       LIST_HEAD(dispose);
+
+       if (list_empty(&ctx->flc_lease))
+               return;
+
+       spin_lock(&ctx->flc_lock);
+       list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+               lease_modify(fl, F_UNLCK, &dispose);
+       spin_unlock(&ctx->flc_lock);
+       locks_dispose_list(&dispose);
+}
+
 /*
  * This function is called on the last close of an open file.
  */
 void locks_remove_file(struct file *filp)
 {
-       struct inode * inode = file_inode(filp);
-       struct file_lock *fl;
-       struct file_lock **before;
-       LIST_HEAD(dispose);
-
-       if (!inode->i_flock)
+       if (!file_inode(filp)->i_flctx)
                return;
 
+       /* remove any OFD locks */
        locks_remove_posix(filp, filp);
 
-       if (filp->f_op->flock) {
-               struct file_lock fl = {
-                       .fl_owner = filp,
-                       .fl_pid = current->tgid,
-                       .fl_file = filp,
-                       .fl_flags = FL_FLOCK,
-                       .fl_type = F_UNLCK,
-                       .fl_end = OFFSET_MAX,
-               };
-               filp->f_op->flock(filp, F_SETLKW, &fl);
-               if (fl.fl_ops && fl.fl_ops->fl_release_private)
-                       fl.fl_ops->fl_release_private(&fl);
-       }
-
-       spin_lock(&inode->i_lock);
-       before = &inode->i_flock;
-
-       while ((fl = *before) != NULL) {
-               if (fl->fl_file == filp) {
-                       if (IS_LEASE(fl)) {
-                               lease_modify(before, F_UNLCK, &dispose);
-                               continue;
-                       }
-
-                       /*
-                        * There's a leftover lock on the list of a type that
-                        * we didn't expect to see. Most likely a classic
-                        * POSIX lock that ended up not getting released
-                        * properly, or that raced onto the list somehow. Log
-                        * some info about it and then just remove it from
-                        * the list.
-                        */
-                       WARN(!IS_FLOCK(fl),
-                               "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
-                               MAJOR(inode->i_sb->s_dev),
-                               MINOR(inode->i_sb->s_dev), inode->i_ino,
-                               fl->fl_type, fl->fl_flags,
-                               fl->fl_start, fl->fl_end);
+       /* remove flock locks */
+       locks_remove_flock(filp);
 
-                       locks_delete_lock(before, &dispose);
-                       continue;
-               }
-               before = &fl->fl_next;
-       }
-       spin_unlock(&inode->i_lock);
-       locks_dispose_list(&dispose);
+       /* remove any leases */
+       locks_remove_lease(filp);
 }
 
 /**
@@ -2621,6 +2663,9 @@ static int __init filelock_init(void)
 {
        int i;
 
+       flctx_cache = kmem_cache_create("file_lock_ctx",
+                       sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
+
        filelock_cache = kmem_cache_create("file_lock_cache",
                        sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
 
index 7f3f606..8cdb2b2 100644 (file)
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
 {
        struct inode *inode = state->inode;
        struct file_lock *fl;
+       struct file_lock_context *flctx = inode->i_flctx;
+       struct list_head *list;
        int status = 0;
 
-       if (inode->i_flock == NULL)
+       if (flctx == NULL)
                goto out;
 
-       /* Protect inode->i_flock using the i_lock */
-       spin_lock(&inode->i_lock);
-       for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
-               if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
-                       continue;
+       list = &flctx->flc_posix;
+       spin_lock(&flctx->flc_lock);
+restart:
+       list_for_each_entry(fl, list, fl_list) {
                if (nfs_file_open_context(fl->fl_file) != ctx)
                        continue;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&flctx->flc_lock);
                status = nfs4_lock_delegation_recall(fl, state, stateid);
                if (status < 0)
                        goto out;
-               spin_lock(&inode->i_lock);
+               spin_lock(&flctx->flc_lock);
        }
-       spin_unlock(&inode->i_lock);
+       if (list == &flctx->flc_posix) {
+               list = &flctx->flc_flock;
+               goto restart;
+       }
+       spin_unlock(&flctx->flc_lock);
 out:
        return status;
 }
index 5194933..a3bb22a 100644 (file)
@@ -1366,49 +1366,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
        struct nfs_inode *nfsi = NFS_I(inode);
        struct file_lock *fl;
        int status = 0;
+       struct file_lock_context *flctx = inode->i_flctx;
+       struct list_head *list;
 
-       if (inode->i_flock == NULL)
+       if (flctx == NULL)
                return 0;
 
+       list = &flctx->flc_posix;
+
        /* Guard against delegation returns and new lock/unlock calls */
        down_write(&nfsi->rwsem);
-       /* Protect inode->i_flock using the BKL */
-       spin_lock(&inode->i_lock);
-       for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
-               if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
-                       continue;
+       spin_lock(&flctx->flc_lock);
+restart:
+       list_for_each_entry(fl, list, fl_list) {
                if (nfs_file_open_context(fl->fl_file)->state != state)
                        continue;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&flctx->flc_lock);
                status = ops->recover_lock(state, fl);
                switch (status) {
-                       case 0:
-                               break;
-                       case -ESTALE:
-                       case -NFS4ERR_ADMIN_REVOKED:
-                       case -NFS4ERR_STALE_STATEID:
-                       case -NFS4ERR_BAD_STATEID:
-                       case -NFS4ERR_EXPIRED:
-                       case -NFS4ERR_NO_GRACE:
-                       case -NFS4ERR_STALE_CLIENTID:
-                       case -NFS4ERR_BADSESSION:
-                       case -NFS4ERR_BADSLOT:
-                       case -NFS4ERR_BAD_HIGH_SLOT:
-                       case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
-                               goto out;
-                       default:
-                               printk(KERN_ERR "NFS: %s: unhandled error %d\n",
-                                        __func__, status);
-                       case -ENOMEM:
-                       case -NFS4ERR_DENIED:
-                       case -NFS4ERR_RECLAIM_BAD:
-                       case -NFS4ERR_RECLAIM_CONFLICT:
-                               /* kill_proc(fl->fl_pid, SIGLOST, 1); */
-                               status = 0;
+               case 0:
+                       break;
+               case -ESTALE:
+               case -NFS4ERR_ADMIN_REVOKED:
+               case -NFS4ERR_STALE_STATEID:
+               case -NFS4ERR_BAD_STATEID:
+               case -NFS4ERR_EXPIRED:
+               case -NFS4ERR_NO_GRACE:
+               case -NFS4ERR_STALE_CLIENTID:
+               case -NFS4ERR_BADSESSION:
+               case -NFS4ERR_BADSLOT:
+               case -NFS4ERR_BAD_HIGH_SLOT:
+               case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+                       goto out;
+               default:
+                       pr_err("NFS: %s: unhandled error %d\n",
+                                       __func__, status);
+               case -ENOMEM:
+               case -NFS4ERR_DENIED:
+               case -NFS4ERR_RECLAIM_BAD:
+               case -NFS4ERR_RECLAIM_CONFLICT:
+                       /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+                       status = 0;
                }
-               spin_lock(&inode->i_lock);
+               spin_lock(&flctx->flc_lock);
        }
-       spin_unlock(&inode->i_lock);
+       if (list == &flctx->flc_posix) {
+               list = &flctx->flc_flock;
+               goto restart;
+       }
+       spin_unlock(&flctx->flc_lock);
 out:
        up_write(&nfsi->rwsem);
        return status;
index 2b5e769..29c7f33 100644 (file)
@@ -826,11 +826,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
                                      struct nfs_pageio_descriptor *pgio)
 {
        size_t size;
+       struct file_lock_context *flctx;
 
        if (prev) {
                if (!nfs_match_open_context(req->wb_context, prev->wb_context))
                        return false;
-               if (req->wb_context->dentry->d_inode->i_flock != NULL &&
+               flctx = req->wb_context->dentry->d_inode->i_flctx;
+               if (flctx != NULL &&
+                   !(list_empty_careful(&flctx->flc_posix) &&
+                     list_empty_careful(&flctx->flc_flock)) &&
                    !nfs_match_lock_context(req->wb_lock_context,
                                            prev->wb_lock_context))
                        return false;
index af3af68..4ae66f4 100644 (file)
@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
 {
        struct nfs_open_context *ctx = nfs_file_open_context(file);
        struct nfs_lock_context *l_ctx;
+       struct file_lock_context *flctx = file_inode(file)->i_flctx;
        struct nfs_page *req;
        int do_flush, status;
        /*
@@ -1109,7 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
                do_flush = req->wb_page != page || req->wb_context != ctx;
                /* for now, flush if more than 1 request in page_group */
                do_flush |= req->wb_this_page != req;
-               if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
+               if (l_ctx && flctx &&
+                   !(list_empty_careful(&flctx->flc_posix) &&
+                     list_empty_careful(&flctx->flc_flock))) {
                        do_flush |= l_ctx->lockowner.l_owner != current->files
                                || l_ctx->lockowner.l_pid != current->tgid;
                }
@@ -1170,6 +1173,13 @@ out:
        return PageUptodate(page) != 0;
 }
 
+static bool
+is_whole_file_wrlock(struct file_lock *fl)
+{
+       return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
+                       fl->fl_type == F_WRLCK;
+}
+
 /* If we know the page is up to date, and we're not using byte range locks (or
  * if we have the whole file locked for writing), it may be more efficient to
  * extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1190,36 @@ out:
  */
 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
 {
+       int ret;
+       struct file_lock_context *flctx = inode->i_flctx;
+       struct file_lock *fl;
+
        if (file->f_flags & O_DSYNC)
                return 0;
        if (!nfs_write_pageuptodate(page, inode))
                return 0;
        if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
                return 1;
-       if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
-                       inode->i_flock->fl_end == OFFSET_MAX &&
-                       inode->i_flock->fl_type != F_RDLCK))
-               return 1;
-       return 0;
+       if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
+                      list_empty_careful(&flctx->flc_posix)))
+               return 0;
+
+       /* Check to see if there are whole file write locks */
+       ret = 0;
+       spin_lock(&flctx->flc_lock);
+       if (!list_empty(&flctx->flc_posix)) {
+               fl = list_first_entry(&flctx->flc_posix, struct file_lock,
+                                       fl_list);
+               if (is_whole_file_wrlock(fl))
+                       ret = 1;
+       } else if (!list_empty(&flctx->flc_flock)) {
+               fl = list_first_entry(&flctx->flc_flock, struct file_lock,
+                                       fl_list);
+               if (fl->fl_type == F_WRLCK)
+                       ret = 1;
+       }
+       spin_unlock(&flctx->flc_lock);
+       return ret;
 }
 
 /*
index c06a1ba..532a60c 100644 (file)
@@ -3477,7 +3477,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
 }
 
 static int
-nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
+nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
+                    struct list_head *dispose)
 {
        if (arg & F_UNLCK)
                return lease_modify(onlist, arg, dispose);
@@ -5556,10 +5557,11 @@ out_nfserr:
 static bool
 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
 {
-       struct file_lock **flpp;
+       struct file_lock *fl;
        int status = false;
        struct file *filp = find_any_file(fp);
        struct inode *inode;
+       struct file_lock_context *flctx;
 
        if (!filp) {
                /* Any valid lock stateid should have some sort of access */
@@ -5568,15 +5570,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
        }
 
        inode = file_inode(filp);
+       flctx = inode->i_flctx;
 
-       spin_lock(&inode->i_lock);
-       for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
-               if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
-                       status = true;
-                       break;
+       if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+               spin_lock(&flctx->flc_lock);
+               list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+                       if (fl->fl_owner == (fl_owner_t)lowner) {
+                               status = true;
+                               break;
+                       }
                }
+               spin_unlock(&flctx->flc_lock);
        }
-       spin_unlock(&inode->i_lock);
        fput(filp);
        return status;
 }
index 1eae330..b6d5133 100644 (file)
@@ -48,6 +48,7 @@ struct ocfs2_quota_recovery {
 /* In-memory structure with quota header information */
 struct ocfs2_mem_dqinfo {
        unsigned int dqi_type;          /* Quota type this structure describes */
+       unsigned int dqi_flags;         /* Flags OLQF_* */
        unsigned int dqi_chunks;        /* Number of chunks in local quota file */
        unsigned int dqi_blocks;        /* Number of blocks allocated for local quota file */
        unsigned int dqi_syncms;        /* How often should we sync with other nodes */
index bb72af3..3d0b63d 100644 (file)
@@ -286,7 +286,7 @@ static void olq_update_info(struct buffer_head *bh, void *private)
        ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
                                                OCFS2_LOCAL_INFO_OFF);
        spin_lock(&dq_data_lock);
-       ldinfo->dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+       ldinfo->dqi_flags = cpu_to_le32(oinfo->dqi_flags);
        ldinfo->dqi_chunks = cpu_to_le32(oinfo->dqi_chunks);
        ldinfo->dqi_blocks = cpu_to_le32(oinfo->dqi_blocks);
        spin_unlock(&dq_data_lock);
@@ -695,8 +695,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        /* We don't need the lock and we have to acquire quota file locks
         * which will later depend on this lock */
        mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
-       info->dqi_maxblimit = 0x7fffffffffffffffLL;
-       info->dqi_maxilimit = 0x7fffffffffffffffLL;
+       info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
+       info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
        oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
        if (!oinfo) {
                mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
@@ -731,13 +731,13 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        }
        ldinfo = (struct ocfs2_local_disk_dqinfo *)(bh->b_data +
                                                OCFS2_LOCAL_INFO_OFF);
-       info->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
+       oinfo->dqi_flags = le32_to_cpu(ldinfo->dqi_flags);
        oinfo->dqi_chunks = le32_to_cpu(ldinfo->dqi_chunks);
        oinfo->dqi_blocks = le32_to_cpu(ldinfo->dqi_blocks);
        oinfo->dqi_libh = bh;
 
        /* We crashed when using local quota file? */
-       if (!(info->dqi_flags & OLQF_CLEAN)) {
+       if (!(oinfo->dqi_flags & OLQF_CLEAN)) {
                rec = OCFS2_SB(sb)->quota_rec;
                if (!rec) {
                        rec = ocfs2_alloc_quota_recovery();
@@ -766,7 +766,7 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
        }
 
        /* Now mark quota file as used */
-       info->dqi_flags &= ~OLQF_CLEAN;
+       oinfo->dqi_flags &= ~OLQF_CLEAN;
        status = ocfs2_modify_bh(lqinode, bh, olq_update_info, info);
        if (status < 0) {
                mlog_errno(status);
@@ -851,7 +851,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
                goto out;
 
        /* Mark local file as clean */
-       info->dqi_flags |= OLQF_CLEAN;
+       oinfo->dqi_flags |= OLQF_CLEAN;
        status = ocfs2_modify_bh(sb_dqopt(sb)->files[type],
                                 oinfo->dqi_libh,
                                 olq_update_info,
index c09d6da..87a1f76 100644 (file)
@@ -1002,36 +1002,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
        }
 }
 
-/* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
-{
-       unsigned int feature[OCFS2_MAXQUOTAS] = {
-                                       OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
-                                       OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
-
-       if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type]))
-               return -EINVAL;
-
-       return dquot_enable(sb_dqopt(sb)->files[type], type,
-                           format_id, DQUOT_LIMITS_ENABLED);
-}
-
-/* Handle quota off quotactl */
-static int ocfs2_quota_off(struct super_block *sb, int type)
-{
-       return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
-}
-
-static const struct quotactl_ops ocfs2_quotactl_ops = {
-       .quota_on_meta  = ocfs2_quota_on,
-       .quota_off      = ocfs2_quota_off,
-       .quota_sync     = dquot_quota_sync,
-       .get_info       = dquot_get_dqinfo,
-       .set_info       = dquot_set_dqinfo,
-       .get_dqblk      = dquot_get_dqblk,
-       .set_dqblk      = dquot_set_dqblk,
-};
-
 static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct dentry *root;
@@ -2087,7 +2057,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        sb->s_op = &ocfs2_sops;
        sb->s_d_op = &ocfs2_dentry_ops;
        sb->s_export_op = &ocfs2_export_ops;
-       sb->s_qcop = &ocfs2_quotactl_ops;
+       sb->s_qcop = &dquot_quotactl_sysfile_ops;
        sb->dq_op = &ocfs2_quota_operations;
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
        sb->s_xattr = ocfs2_xattr_handlers;
index 69df5b2..0ccd4ba 100644 (file)
@@ -1248,7 +1248,7 @@ static int ignore_hardlimit(struct dquot *dquot)
 
        return capable(CAP_SYS_RESOURCE) &&
               (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
-               !(info->dqi_flags & V1_DQF_RSQUASH));
+               !(info->dqi_flags & DQF_ROOT_SQUASH));
 }
 
 /* needs dq_data_lock */
@@ -2385,14 +2385,84 @@ out:
 }
 EXPORT_SYMBOL(dquot_quota_on_mount);
 
-static inline qsize_t qbtos(qsize_t blocks)
+static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
 {
-       return blocks << QIF_DQBLKSIZE_BITS;
+       int ret;
+       int type;
+       struct quota_info *dqopt = sb_dqopt(sb);
+
+       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+               return -ENOSYS;
+       /* Accounting cannot be turned on while fs is mounted */
+       flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
+       if (!flags)
+               return -EINVAL;
+       for (type = 0; type < MAXQUOTAS; type++) {
+               if (!(flags & qtype_enforce_flag(type)))
+                       continue;
+               /* Can't enforce without accounting */
+               if (!sb_has_quota_usage_enabled(sb, type))
+                       return -EINVAL;
+               ret = dquot_enable(dqopt->files[type], type,
+                                  dqopt->info[type].dqi_fmt_id,
+                                  DQUOT_LIMITS_ENABLED);
+               if (ret < 0)
+                       goto out_err;
+       }
+       return 0;
+out_err:
+       /* Backout enforcement enablement we already did */
+       for (type--; type >= 0; type--)  {
+               if (flags & qtype_enforce_flag(type))
+                       dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+       }
+       /* Error code translation for better compatibility with XFS */
+       if (ret == -EBUSY)
+               ret = -EEXIST;
+       return ret;
 }
 
-static inline qsize_t stoqb(qsize_t space)
+static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
 {
-       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+       int ret;
+       int type;
+       struct quota_info *dqopt = sb_dqopt(sb);
+
+       if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+               return -ENOSYS;
+       /*
+        * We don't support turning off accounting via quotactl. In principle
+        * quota infrastructure can do this but filesystems don't expect
+        * userspace to be able to do it.
+        */
+       if (flags &
+                 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
+               return -EOPNOTSUPP;
+
+       /* Filter out limits not enabled */
+       for (type = 0; type < MAXQUOTAS; type++)
+               if (!sb_has_quota_limits_enabled(sb, type))
+                       flags &= ~qtype_enforce_flag(type);
+       /* Nothing left? */
+       if (!flags)
+               return -EEXIST;
+       for (type = 0; type < MAXQUOTAS; type++) {
+               if (flags & qtype_enforce_flag(type)) {
+                       ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+                       if (ret < 0)
+                               goto out_err;
+               }
+       }
+       return 0;
+out_err:
+       /* Backout enforcement disabling we already did */
+       for (type--; type >= 0; type--)  {
+               if (flags & qtype_enforce_flag(type))
+                       dquot_enable(dqopt->files[type], type,
+                                    dqopt->info[type].dqi_fmt_id,
+                                    DQUOT_LIMITS_ENABLED);
+       }
+       return ret;
 }
 
 /* Generic routine for getting common part of quota structure */
@@ -2444,13 +2514,13 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
                return -EINVAL;
 
        if (((di->d_fieldmask & QC_SPC_SOFT) &&
-            stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+            di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
            ((di->d_fieldmask & QC_SPC_HARD) &&
-            stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+            di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
            ((di->d_fieldmask & QC_INO_SOFT) &&
-            (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+            (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
            ((di->d_fieldmask & QC_INO_HARD) &&
-            (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+            (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
                return -ERANGE;
 
        spin_lock(&dq_data_lock);
@@ -2577,6 +2647,14 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
                goto out;
        }
        mi = sb_dqopt(sb)->info + type;
+       if (ii->dqi_valid & IIF_FLAGS) {
+               if (ii->dqi_flags & ~DQF_SETINFO_MASK ||
+                   (ii->dqi_flags & DQF_ROOT_SQUASH &&
+                    mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) {
+                       err = -EINVAL;
+                       goto out;
+               }
+       }
        spin_lock(&dq_data_lock);
        if (ii->dqi_valid & IIF_BGRACE)
                mi->dqi_bgrace = ii->dqi_bgrace;
@@ -2606,6 +2684,17 @@ const struct quotactl_ops dquot_quotactl_ops = {
 };
 EXPORT_SYMBOL(dquot_quotactl_ops);
 
+const struct quotactl_ops dquot_quotactl_sysfile_ops = {
+       .quota_enable   = dquot_quota_enable,
+       .quota_disable  = dquot_quota_disable,
+       .quota_sync     = dquot_quota_sync,
+       .get_info       = dquot_get_dqinfo,
+       .set_info       = dquot_set_dqinfo,
+       .get_dqblk      = dquot_get_dqblk,
+       .set_dqblk      = dquot_set_dqblk
+};
+EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
+
 static int do_proc_dqstats(struct ctl_table *table, int write,
                     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
index 6f38563..d14a799 100644 (file)
@@ -66,18 +66,40 @@ static int quota_sync_all(int type)
        return ret;
 }
 
+unsigned int qtype_enforce_flag(int type)
+{
+       switch (type) {
+       case USRQUOTA:
+               return FS_QUOTA_UDQ_ENFD;
+       case GRPQUOTA:
+               return FS_QUOTA_GDQ_ENFD;
+       case PRJQUOTA:
+               return FS_QUOTA_PDQ_ENFD;
+       }
+       return 0;
+}
+
 static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
                         struct path *path)
 {
-       if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+       if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
                return -ENOSYS;
-       if (sb->s_qcop->quota_on_meta)
-               return sb->s_qcop->quota_on_meta(sb, type, id);
+       if (sb->s_qcop->quota_enable)
+               return sb->s_qcop->quota_enable(sb, qtype_enforce_flag(type));
        if (IS_ERR(path))
                return PTR_ERR(path);
        return sb->s_qcop->quota_on(sb, type, id, path);
 }
 
+static int quota_quotaoff(struct super_block *sb, int type)
+{
+       if (!sb->s_qcop->quota_off && !sb->s_qcop->quota_disable)
+               return -ENOSYS;
+       if (sb->s_qcop->quota_disable)
+               return sb->s_qcop->quota_disable(sb, qtype_enforce_flag(type));
+       return sb->s_qcop->quota_off(sb, type);
+}
+
 static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
 {
        __u32 fmt;
@@ -208,15 +230,26 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
        return sb->s_qcop->set_dqblk(sb, qid, &fdq);
 }
 
-static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
+static int quota_enable(struct super_block *sb, void __user *addr)
 {
        __u32 flags;
 
        if (copy_from_user(&flags, addr, sizeof(flags)))
                return -EFAULT;
-       if (!sb->s_qcop->set_xstate)
+       if (!sb->s_qcop->quota_enable)
                return -ENOSYS;
-       return sb->s_qcop->set_xstate(sb, flags, cmd);
+       return sb->s_qcop->quota_enable(sb, flags);
+}
+
+static int quota_disable(struct super_block *sb, void __user *addr)
+{
+       __u32 flags;
+
+       if (copy_from_user(&flags, addr, sizeof(flags)))
+               return -EFAULT;
+       if (!sb->s_qcop->quota_disable)
+               return -ENOSYS;
+       return sb->s_qcop->quota_disable(sb, flags);
 }
 
 static int quota_getxstate(struct super_block *sb, void __user *addr)
@@ -429,9 +462,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
        case Q_QUOTAON:
                return quota_quotaon(sb, type, cmd, id, path);
        case Q_QUOTAOFF:
-               if (!sb->s_qcop->quota_off)
-                       return -ENOSYS;
-               return sb->s_qcop->quota_off(sb, type);
+               return quota_quotaoff(sb, type);
        case Q_GETFMT:
                return quota_getfmt(sb, type, addr);
        case Q_GETINFO:
@@ -447,8 +478,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                        return -ENOSYS;
                return sb->s_qcop->quota_sync(sb, type);
        case Q_XQUOTAON:
+               return quota_enable(sb, addr);
        case Q_XQUOTAOFF:
-               return quota_setxstate(sb, cmd, addr);
+               return quota_disable(sb, addr);
        case Q_XQUOTARM:
                return quota_rmxquota(sb, addr);
        case Q_XGETQSTAT:
index 469c684..8fe79be 100644 (file)
@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
        }
        ret = 0;
        /* limits are stored as unsigned 32-bit data */
-       dqopt->info[type].dqi_maxblimit = 0xffffffff;
-       dqopt->info[type].dqi_maxilimit = 0xffffffff;
+       dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+       dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
        dqopt->info[type].dqi_igrace =
                        dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
        dqopt->info[type].dqi_bgrace =
index 02751ec..9cb10d7 100644 (file)
@@ -117,16 +117,17 @@ static int v2_read_file_info(struct super_block *sb, int type)
        qinfo = info->dqi_priv;
        if (version == 0) {
                /* limits are stored as unsigned 32-bit data */
-               info->dqi_maxblimit = 0xffffffff;
-               info->dqi_maxilimit = 0xffffffff;
+               info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+               info->dqi_max_ino_limit = 0xffffffff;
        } else {
-               /* used space is stored as unsigned 64-bit value */
-               info->dqi_maxblimit = 0xffffffffffffffffULL;    /* 2^64-1 */
-               info->dqi_maxilimit = 0xffffffffffffffffULL;
+               /* used space is stored as unsigned 64-bit value in bytes */
+               info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
+               info->dqi_max_ino_limit = 0xffffffffffffffffULL;
        }
        info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
        info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
-       info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
+       /* No flags currently supported */
+       info->dqi_flags = 0;
        qinfo->dqi_sb = sb;
        qinfo->dqi_type = type;
        qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
@@ -157,7 +158,8 @@ static int v2_write_file_info(struct super_block *sb, int type)
        info->dqi_flags &= ~DQF_INFO_DIRTY;
        dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
        dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
-       dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK);
+       /* No flags currently supported */
+       dinfo.dqi_flags = cpu_to_le32(0);
        spin_unlock(&dq_data_lock);
        dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks);
        dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk);
index c0805c9..4060691 100644 (file)
@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
                        return retval;
        }
 
-       if (unlikely(inode->i_flock && mandatory_lock(inode))) {
+       if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
                retval = locks_mandatory_area(
                        read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
                        inode, file, pos, count);
index 0e0e99b..c6e17a7 100644 (file)
@@ -2,10 +2,12 @@ config UDF_FS
        tristate "UDF file system support"
        select CRC_ITU_T
        help
-         This is the new file system used on some CD-ROMs and DVDs. Say Y if
-         you intend to mount DVD discs or CDRW's written in packet mode, or
-         if written to by other UDF utilities, such as DirectCD.
-         Please read <file:Documentation/filesystems/udf.txt>.
+         This is a file system used on some CD-ROMs and DVDs. Since the
+         file system is supported by multiple operating systems and is more
+         compatible with standard unix file systems, it is also suitable for
+         removable USB disks. Say Y if you intend to mount DVD discs or CDRW's
+         written in packet mode, or if you want to use UDF for removable USB
+         disks. Please read <file:Documentation/filesystems/udf.txt>.
 
          To compile this file system support as a module, choose M here: the
          module will be called udf.
index 5bc71d9..a445d59 100644 (file)
@@ -750,7 +750,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
        /* Are we beyond EOF? */
        if (etype == -1) {
                int ret;
-               isBeyondEOF = 1;
+               isBeyondEOF = true;
                if (count) {
                        if (c)
                                laarr[0] = laarr[1];
@@ -792,7 +792,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                endnum = c + 1;
                lastblock = 1;
        } else {
-               isBeyondEOF = 0;
+               isBeyondEOF = false;
                endnum = startnum = ((count > 2) ? 2 : count);
 
                /* if the current extent is in position 0,
@@ -1288,6 +1288,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
        struct kernel_lb_addr *iloc = &iinfo->i_location;
        unsigned int link_count;
        unsigned int indirections = 0;
+       int bs = inode->i_sb->s_blocksize;
        int ret = -EIO;
 
 reread:
@@ -1374,38 +1375,35 @@ reread:
        if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
                iinfo->i_efe = 1;
                iinfo->i_use = 0;
-               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+               ret = udf_alloc_i_data(inode, bs -
                                        sizeof(struct extendedFileEntry));
                if (ret)
                        goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct extendedFileEntry),
-                      inode->i_sb->s_blocksize -
-                                       sizeof(struct extendedFileEntry));
+                      bs - sizeof(struct extendedFileEntry));
        } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
                iinfo->i_efe = 0;
                iinfo->i_use = 0;
-               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
-                                               sizeof(struct fileEntry));
+               ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
                if (ret)
                        goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct fileEntry),
-                      inode->i_sb->s_blocksize - sizeof(struct fileEntry));
+                      bs - sizeof(struct fileEntry));
        } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
                iinfo->i_efe = 0;
                iinfo->i_use = 1;
                iinfo->i_lenAlloc = le32_to_cpu(
                                ((struct unallocSpaceEntry *)bh->b_data)->
                                 lengthAllocDescs);
-               ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize -
+               ret = udf_alloc_i_data(inode, bs -
                                        sizeof(struct unallocSpaceEntry));
                if (ret)
                        goto out;
                memcpy(iinfo->i_ext.i_data,
                       bh->b_data + sizeof(struct unallocSpaceEntry),
-                      inode->i_sb->s_blocksize -
-                                       sizeof(struct unallocSpaceEntry));
+                      bs - sizeof(struct unallocSpaceEntry));
                return 0;
        }
 
@@ -1489,6 +1487,15 @@ reread:
        }
        inode->i_generation = iinfo->i_unique;
 
+       /*
+        * Sanity check length of allocation descriptors and extended attrs to
+        * avoid integer overflows
+        */
+       if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
+               goto out;
+       /* Now do exact checks */
+       if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
+               goto out;
        /* Sanity checks for files in ICB so that we don't get confused later */
        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
                /*
@@ -1498,8 +1505,7 @@ reread:
                if (iinfo->i_lenAlloc != inode->i_size)
                        goto out;
                /* File in ICB has to fit in there... */
-               if (inode->i_size > inode->i_sb->s_blocksize -
-                                       udf_file_entry_alloc_offset(inode))
+               if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
                        goto out;
        }
 
index 3ccb2f1..f169411 100644 (file)
@@ -1599,7 +1599,7 @@ static noinline int udf_process_sequence(
        struct udf_vds_record *curr;
        struct generic_desc *gd;
        struct volDescPtr *vdp;
-       int done = 0;
+       bool done = false;
        uint32_t vdsn;
        uint16_t ident;
        long next_s = 0, next_e = 0;
@@ -1680,7 +1680,7 @@ static noinline int udf_process_sequence(
                                lastblock = next_e;
                                next_s = next_e = 0;
                        } else
-                               done = 1;
+                               done = true;
                        break;
                }
                brelse(bh);
@@ -2300,6 +2300,7 @@ static void udf_put_super(struct super_block *sb)
                udf_close_lvid(sb);
        brelse(sbi->s_lvid_bh);
        udf_sb_free_partitions(sb);
+       mutex_destroy(&sbi->s_alloc_mutex);
        kfree(sb->s_fs_info);
        sb->s_fs_info = NULL;
 }
index 53e95b2..a7a3a63 100644 (file)
@@ -91,16 +91,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
        return ptr;
 }
 
-void
-kmem_free(const void *ptr)
-{
-       if (!is_vmalloc_addr(ptr)) {
-               kfree(ptr);
-       } else {
-               vfree(ptr);
-       }
-}
-
 void *
 kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
             xfs_km_flags_t flags)
index 64db0e5..cc6b768 100644 (file)
@@ -63,7 +63,10 @@ kmem_flags_convert(xfs_km_flags_t flags)
 extern void *kmem_alloc(size_t, xfs_km_flags_t);
 extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
 extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
-extern void  kmem_free(const void *);
+static inline void  kmem_free(const void *ptr)
+{
+       kvfree(ptr);
+}
 
 
 extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
index 5d38e8b..15105db 100644 (file)
@@ -403,7 +403,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
                if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
                        xfs_sb_version_addattr2(&mp->m_sb);
                        spin_unlock(&mp->m_sb_lock);
-                       xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+                       xfs_log_sb(tp);
                } else
                        spin_unlock(&mp->m_sb_lock);
        }
index b5eb474..61ec015 100644 (file)
@@ -973,7 +973,11 @@ xfs_bmap_local_to_extents(
        *firstblock = args.fsbno;
        bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
 
-       /* initialise the block and copy the data */
+       /*
+        * Initialise the block and copy the data
+        *
+        * Note: init_fn must set the buffer log item type correctly!
+        */
        init_fn(tp, bp, ip, ifp);
 
        /* account for the change in fork size and log everything */
@@ -1221,22 +1225,20 @@ xfs_bmap_add_attrfork(
                goto bmap_cancel;
        if (!xfs_sb_version_hasattr(&mp->m_sb) ||
           (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
-               __int64_t sbfields = 0;
+               bool log_sb = false;
 
                spin_lock(&mp->m_sb_lock);
                if (!xfs_sb_version_hasattr(&mp->m_sb)) {
                        xfs_sb_version_addattr(&mp->m_sb);
-                       sbfields |= XFS_SB_VERSIONNUM;
+                       log_sb = true;
                }
                if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
                        xfs_sb_version_addattr2(&mp->m_sb);
-                       sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
+                       log_sb = true;
                }
-               if (sbfields) {
-                       spin_unlock(&mp->m_sb_lock);
-                       xfs_mod_sb(tp, sbfields);
-               } else
-                       spin_unlock(&mp->m_sb_lock);
+               spin_unlock(&mp->m_sb_lock);
+               if (log_sb)
+                       xfs_log_sb(tp);
        }
 
        error = xfs_bmap_finish(&tp, &flist, &committed);
index 44db6db..b9d8a49 100644 (file)
@@ -27,6 +27,37 @@ struct xfs_trans;
 
 extern kmem_zone_t     *xfs_bmap_free_item_zone;
 
+/*
+ * Argument structure for xfs_bmap_alloc.
+ */
+struct xfs_bmalloca {
+       xfs_fsblock_t           *firstblock; /* i/o first block allocated */
+       struct xfs_bmap_free    *flist; /* bmap freelist */
+       struct xfs_trans        *tp;    /* transaction pointer */
+       struct xfs_inode        *ip;    /* incore inode pointer */
+       struct xfs_bmbt_irec    prev;   /* extent before the new one */
+       struct xfs_bmbt_irec    got;    /* extent after, or delayed */
+
+       xfs_fileoff_t           offset; /* offset in file filling in */
+       xfs_extlen_t            length; /* i/o length asked/allocated */
+       xfs_fsblock_t           blkno;  /* starting block of new extent */
+
+       struct xfs_btree_cur    *cur;   /* btree cursor */
+       xfs_extnum_t            idx;    /* current extent index */
+       int                     nallocs;/* number of extents alloc'd */
+       int                     logflags;/* flags for transaction logging */
+
+       xfs_extlen_t            total;  /* total blocks needed for xaction */
+       xfs_extlen_t            minlen; /* minimum allocation size (blocks) */
+       xfs_extlen_t            minleft; /* amount must be left after alloc */
+       bool                    eof;    /* set if allocating past last extent */
+       bool                    wasdel; /* replacing a delayed allocation */
+       bool                    userdata;/* set if is user data */
+       bool                    aeof;   /* allocated space at eof */
+       bool                    conv;   /* overwriting unwritten extents */
+       int                     flags;
+};
+
 /*
  * List of extents to be free "later".
  * The list is kept sorted on xbf_startblock.
@@ -149,6 +180,8 @@ void        xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
 void   xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
                struct xfs_bmap_free *flist, struct xfs_mount *mp);
 void   xfs_bmap_cancel(struct xfs_bmap_free *flist);
+int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
+                       int *committed);
 void   xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
 int    xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
index fbd6da2..8eb7189 100644 (file)
@@ -151,10 +151,13 @@ typedef struct xfs_sb {
        __uint32_t      sb_features2;   /* additional feature bits */
 
        /*
-        * bad features2 field as a result of failing to pad the sb
-        * structure to 64 bits. Some machines will be using this field
-        * for features2 bits. Easiest just to mark it bad and not use
-        * it for anything else.
+        * bad features2 field as a result of failing to pad the sb structure to
+        * 64 bits. Some machines will be using this field for features2 bits.
+        * Easiest just to mark it bad and not use it for anything else.
+        *
+        * This is not kept up to date in memory; it is always overwritten by
+        * the value in sb_features2 when formatting the incore superblock to
+        * the disk buffer.
         */
        __uint32_t      sb_bad_features2;
 
@@ -304,8 +307,8 @@ typedef enum {
 #define XFS_SB_ICOUNT          XFS_SB_MVAL(ICOUNT)
 #define XFS_SB_IFREE           XFS_SB_MVAL(IFREE)
 #define XFS_SB_FDBLOCKS                XFS_SB_MVAL(FDBLOCKS)
-#define XFS_SB_FEATURES2       XFS_SB_MVAL(FEATURES2)
-#define XFS_SB_BAD_FEATURES2   XFS_SB_MVAL(BAD_FEATURES2)
+#define XFS_SB_FEATURES2       (XFS_SB_MVAL(FEATURES2) | \
+                                XFS_SB_MVAL(BAD_FEATURES2))
 #define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
 #define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
 #define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
@@ -319,9 +322,9 @@ typedef enum {
         XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
         XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
         XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
-        XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
-        XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
-        XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
+        XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
+        XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
+        XFS_SB_PQUOTINO)
 
 
 /*
@@ -453,13 +456,11 @@ static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
 {
        sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
        sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
-       sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
 }
 
 static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
 {
        sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
-       sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
        if (!sbp->sb_features2)
                sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
 }
@@ -475,7 +476,6 @@ static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
 {
        sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
        sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
-       sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
 }
 
 /*
similarity index 100%
rename from fs/xfs/xfs_fs.h
rename to fs/xfs/libxfs/xfs_fs.h
index 752915f..b0a5fe9 100644 (file)
  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
  */
 
-static const struct {
-       short offset;
-       short type;     /* 0 = integer
-                        * 1 = binary / string (no translation)
-                        */
-} xfs_sb_info[] = {
-       { offsetof(xfs_sb_t, sb_magicnum),      0 },
-       { offsetof(xfs_sb_t, sb_blocksize),     0 },
-       { offsetof(xfs_sb_t, sb_dblocks),       0 },
-       { offsetof(xfs_sb_t, sb_rblocks),       0 },
-       { offsetof(xfs_sb_t, sb_rextents),      0 },
-       { offsetof(xfs_sb_t, sb_uuid),          1 },
-       { offsetof(xfs_sb_t, sb_logstart),      0 },
-       { offsetof(xfs_sb_t, sb_rootino),       0 },
-       { offsetof(xfs_sb_t, sb_rbmino),        0 },
-       { offsetof(xfs_sb_t, sb_rsumino),       0 },
-       { offsetof(xfs_sb_t, sb_rextsize),      0 },
-       { offsetof(xfs_sb_t, sb_agblocks),      0 },
-       { offsetof(xfs_sb_t, sb_agcount),       0 },
-       { offsetof(xfs_sb_t, sb_rbmblocks),     0 },
-       { offsetof(xfs_sb_t, sb_logblocks),     0 },
-       { offsetof(xfs_sb_t, sb_versionnum),    0 },
-       { offsetof(xfs_sb_t, sb_sectsize),      0 },
-       { offsetof(xfs_sb_t, sb_inodesize),     0 },
-       { offsetof(xfs_sb_t, sb_inopblock),     0 },
-       { offsetof(xfs_sb_t, sb_fname[0]),      1 },
-       { offsetof(xfs_sb_t, sb_blocklog),      0 },
-       { offsetof(xfs_sb_t, sb_sectlog),       0 },
-       { offsetof(xfs_sb_t, sb_inodelog),      0 },
-       { offsetof(xfs_sb_t, sb_inopblog),      0 },
-       { offsetof(xfs_sb_t, sb_agblklog),      0 },
-       { offsetof(xfs_sb_t, sb_rextslog),      0 },
-       { offsetof(xfs_sb_t, sb_inprogress),    0 },
-       { offsetof(xfs_sb_t, sb_imax_pct),      0 },
-       { offsetof(xfs_sb_t, sb_icount),        0 },
-       { offsetof(xfs_sb_t, sb_ifree),         0 },
-       { offsetof(xfs_sb_t, sb_fdblocks),      0 },
-       { offsetof(xfs_sb_t, sb_frextents),     0 },
-       { offsetof(xfs_sb_t, sb_uquotino),      0 },
-       { offsetof(xfs_sb_t, sb_gquotino),      0 },
-       { offsetof(xfs_sb_t, sb_qflags),        0 },
-       { offsetof(xfs_sb_t, sb_flags),         0 },
-       { offsetof(xfs_sb_t, sb_shared_vn),     0 },
-       { offsetof(xfs_sb_t, sb_inoalignmt),    0 },
-       { offsetof(xfs_sb_t, sb_unit),          0 },
-       { offsetof(xfs_sb_t, sb_width),         0 },
-       { offsetof(xfs_sb_t, sb_dirblklog),     0 },
-       { offsetof(xfs_sb_t, sb_logsectlog),    0 },
-       { offsetof(xfs_sb_t, sb_logsectsize),   0 },
-       { offsetof(xfs_sb_t, sb_logsunit),      0 },
-       { offsetof(xfs_sb_t, sb_features2),     0 },
-       { offsetof(xfs_sb_t, sb_bad_features2), 0 },
-       { offsetof(xfs_sb_t, sb_features_compat),       0 },
-       { offsetof(xfs_sb_t, sb_features_ro_compat),    0 },
-       { offsetof(xfs_sb_t, sb_features_incompat),     0 },
-       { offsetof(xfs_sb_t, sb_features_log_incompat), 0 },
-       { offsetof(xfs_sb_t, sb_crc),           0 },
-       { offsetof(xfs_sb_t, sb_pad),           0 },
-       { offsetof(xfs_sb_t, sb_pquotino),      0 },
-       { offsetof(xfs_sb_t, sb_lsn),           0 },
-       { sizeof(xfs_sb_t),                     0 }
-};
-
 /*
  * Reference counting access wrappers to the perag structures.
  * Because we never free per-ag structures, the only thing we
@@ -461,58 +398,49 @@ xfs_sb_from_disk(
        __xfs_sb_from_disk(to, from, true);
 }
 
-static inline void
+static void
 xfs_sb_quota_to_disk(
-       xfs_dsb_t       *to,
-       xfs_sb_t        *from,
-       __int64_t       *fields)
+       struct xfs_dsb  *to,
+       struct xfs_sb   *from)
 {
        __uint16_t      qflags = from->sb_qflags;
 
+       to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
+       if (xfs_sb_version_has_pquotino(from)) {
+               to->sb_qflags = cpu_to_be16(from->sb_qflags);
+               to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
+               to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
+               return;
+       }
+
        /*
-        * We need to do these manipilations only if we are working
-        * with an older version of on-disk superblock.
+        * The in-core version of sb_qflags do not have XFS_OQUOTA_*
+        * flags, whereas the on-disk version does.  So, convert incore
+        * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
         */
-       if (xfs_sb_version_has_pquotino(from))
-               return;
+       qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
+                       XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
 
-       if (*fields & XFS_SB_QFLAGS) {
-               /*
-                * The in-core version of sb_qflags do not have
-                * XFS_OQUOTA_* flags, whereas the on-disk version
-                * does.  So, convert incore XFS_{PG}QUOTA_* flags
-                * to on-disk XFS_OQUOTA_* flags.
-                */
-               qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
-                               XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
-
-               if (from->sb_qflags &
-                               (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
-                       qflags |= XFS_OQUOTA_ENFD;
-               if (from->sb_qflags &
-                               (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
-                       qflags |= XFS_OQUOTA_CHKD;
-               to->sb_qflags = cpu_to_be16(qflags);
-               *fields &= ~XFS_SB_QFLAGS;
-       }
+       if (from->sb_qflags &
+                       (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
+               qflags |= XFS_OQUOTA_ENFD;
+       if (from->sb_qflags &
+                       (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
+               qflags |= XFS_OQUOTA_CHKD;
+       to->sb_qflags = cpu_to_be16(qflags);
 
        /*
-        * GQUOTINO and PQUOTINO cannot be used together in versions of
-        * superblock that do not have pquotino. from->sb_flags tells us which
-        * quota is active and should be copied to disk. If neither are active,
-        * make sure we write NULLFSINO to the sb_gquotino field as a quota
-        * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
-        * bit is set.
+        * GQUOTINO and PQUOTINO cannot be used together in versions
+        * of superblock that do not have pquotino. from->sb_flags
+        * tells us which quota is active and should be copied to
+        * disk. If neither are active, we should NULL the inode.
         *
-        * Note that we don't need to handle the sb_uquotino or sb_pquotino here
-        * as they do not require any translation. Hence the main sb field loop
-        * will write them appropriately from the in-core superblock.
+        * In all cases, the separate pquotino must remain 0 because it
+        * it beyond the "end" of the valid non-pquotino superblock.
         */
-       if ((*fields & XFS_SB_GQUOTINO) &&
-                               (from->sb_qflags & XFS_GQUOTA_ACCT))
+       if (from->sb_qflags & XFS_GQUOTA_ACCT)
                to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
-       else if ((*fields & XFS_SB_PQUOTINO) &&
-                               (from->sb_qflags & XFS_PQUOTA_ACCT))
+       else if (from->sb_qflags & XFS_PQUOTA_ACCT)
                to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
        else {
                /*
@@ -526,63 +454,78 @@ xfs_sb_quota_to_disk(
                        to->sb_gquotino = cpu_to_be64(NULLFSINO);
        }
 
-       *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
+       to->sb_pquotino = 0;
 }
 
-/*
- * Copy in core superblock to ondisk one.
- *
- * The fields argument is mask of superblock fields to copy.
- */
 void
 xfs_sb_to_disk(
-       xfs_dsb_t       *to,
-       xfs_sb_t        *from,
-       __int64_t       fields)
+       struct xfs_dsb  *to,
+       struct xfs_sb   *from)
 {
-       xfs_caddr_t     to_ptr = (xfs_caddr_t)to;
-       xfs_caddr_t     from_ptr = (xfs_caddr_t)from;
-       xfs_sb_field_t  f;
-       int             first;
-       int             size;
-
-       ASSERT(fields);
-       if (!fields)
-               return;
+       xfs_sb_quota_to_disk(to, from);
 
-       /* We should never write the crc here, it's updated in the IO path */
-       fields &= ~XFS_SB_CRC;
-
-       xfs_sb_quota_to_disk(to, from, &fields);
-       while (fields) {
-               f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
-               first = xfs_sb_info[f].offset;
-               size = xfs_sb_info[f + 1].offset - first;
-
-               ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
-
-               if (size == 1 || xfs_sb_info[f].type == 1) {
-                       memcpy(to_ptr + first, from_ptr + first, size);
-               } else {
-                       switch (size) {
-                       case 2:
-                               *(__be16 *)(to_ptr + first) =
-                                     cpu_to_be16(*(__u16 *)(from_ptr + first));
-                               break;
-                       case 4:
-                               *(__be32 *)(to_ptr + first) =
-                                     cpu_to_be32(*(__u32 *)(from_ptr + first));
-                               break;
-                       case 8:
-                               *(__be64 *)(to_ptr + first) =
-                                     cpu_to_be64(*(__u64 *)(from_ptr + first));
-                               break;
-                       default:
-                               ASSERT(0);
-                       }
-               }
+       to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
+       to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
+       to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
+       to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
+       to->sb_rextents = cpu_to_be64(from->sb_rextents);
+       memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
+       to->sb_logstart = cpu_to_be64(from->sb_logstart);
+       to->sb_rootino = cpu_to_be64(from->sb_rootino);
+       to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
+       to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
+       to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
+       to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
+       to->sb_agcount = cpu_to_be32(from->sb_agcount);
+       to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
+       to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
+       to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
+       to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
+       to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
+       to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
+       memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
+       to->sb_blocklog = from->sb_blocklog;
+       to->sb_sectlog = from->sb_sectlog;
+       to->sb_inodelog = from->sb_inodelog;
+       to->sb_inopblog = from->sb_inopblog;
+       to->sb_agblklog = from->sb_agblklog;
+       to->sb_rextslog = from->sb_rextslog;
+       to->sb_inprogress = from->sb_inprogress;
+       to->sb_imax_pct = from->sb_imax_pct;
+       to->sb_icount = cpu_to_be64(from->sb_icount);
+       to->sb_ifree = cpu_to_be64(from->sb_ifree);
+       to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
+       to->sb_frextents = cpu_to_be64(from->sb_frextents);
 
-               fields &= ~(1LL << f);
+       to->sb_flags = from->sb_flags;
+       to->sb_shared_vn = from->sb_shared_vn;
+       to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
+       to->sb_unit = cpu_to_be32(from->sb_unit);
+       to->sb_width = cpu_to_be32(from->sb_width);
+       to->sb_dirblklog = from->sb_dirblklog;
+       to->sb_logsectlog = from->sb_logsectlog;
+       to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
+       to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
+
+       /*
+        * We need to ensure that bad_features2 always matches features2.
+        * Hence we enforce that here rather than having to remember to do it
+        * everywhere else that updates features2.
+        */
+       from->sb_bad_features2 = from->sb_features2;
+       to->sb_features2 = cpu_to_be32(from->sb_features2);
+       to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
+
+       if (xfs_sb_version_hascrc(from)) {
+               to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
+               to->sb_features_ro_compat =
+                               cpu_to_be32(from->sb_features_ro_compat);
+               to->sb_features_incompat =
+                               cpu_to_be32(from->sb_features_incompat);
+               to->sb_features_log_incompat =
+                               cpu_to_be32(from->sb_features_log_incompat);
+               to->sb_pad = 0;
+               to->sb_lsn = cpu_to_be64(from->sb_lsn);
        }
 }
 
@@ -816,42 +759,51 @@ xfs_initialize_perag_data(
 }
 
 /*
- * xfs_mod_sb() can be used to copy arbitrary changes to the
- * in-core superblock into the superblock buffer to be logged.
- * It does not provide the higher level of locking that is
- * needed to protect the in-core superblock from concurrent
- * access.
+ * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
+ * into the superblock buffer to be logged.  It does not provide the higher
+ * level of locking that is needed to protect the in-core superblock from
+ * concurrent access.
  */
 void
-xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
+xfs_log_sb(
+       struct xfs_trans        *tp)
 {
-       xfs_buf_t       *bp;
-       int             first;
-       int             last;
-       xfs_mount_t     *mp;
-       xfs_sb_field_t  f;
-
-       ASSERT(fields);
-       if (!fields)
-               return;
-       mp = tp->t_mountp;
-       bp = xfs_trans_getsb(tp, mp, 0);
-       first = sizeof(xfs_sb_t);
-       last = 0;
-
-       /* translate/copy */
+       struct xfs_mount        *mp = tp->t_mountp;
+       struct xfs_buf          *bp = xfs_trans_getsb(tp, mp, 0);
 
-       xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
+       xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
+       xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
+}
 
-       /* find modified range */
-       f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
-       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
-       last = xfs_sb_info[f + 1].offset - 1;
+/*
+ * xfs_sync_sb
+ *
+ * Sync the superblock to disk.
+ *
+ * Note that the caller is responsible for checking the frozen state of the
+ * filesystem. This procedure uses the non-blocking transaction allocator and
+ * thus will allow modifications to a frozen fs. This is required because this
+ * code can be called during the process of freezing where use of the high-level
+ * allocator would deadlock.
+ */
+int
+xfs_sync_sb(
+       struct xfs_mount        *mp,
+       bool                    wait)
+{
+       struct xfs_trans        *tp;
+       int                     error;
 
-       f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
-       ASSERT((1LL << f) & XFS_SB_MOD_BITS);
-       first = xfs_sb_info[f].offset;
+       tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
 
-       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
-       xfs_trans_log_buf(tp, bp, first, last);
+       xfs_log_sb(tp);
+       if (wait)
+               xfs_trans_set_sync(tp);
+       return xfs_trans_commit(tp, 0);
 }
index 8eb1c54..b25bb9a 100644 (file)
@@ -27,11 +27,12 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
 extern void    xfs_perag_put(struct xfs_perag *pag);
 extern int     xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
 
-extern void    xfs_sb_calc_crc(struct xfs_buf  *);
-extern void    xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern void    xfs_sb_mount_common(struct xfs_mount *, struct xfs_sb *);
-extern void    xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
-extern void    xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
+extern void    xfs_sb_calc_crc(struct xfs_buf *bp);
+extern void    xfs_log_sb(struct xfs_trans *tp);
+extern int     xfs_sync_sb(struct xfs_mount *mp, bool wait);
+extern void    xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
+extern void    xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
+extern void    xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
 extern void    xfs_sb_quota_from_disk(struct xfs_sb *sbp);
 
 #endif /* __XFS_SB_H__ */
index 82404da..8dda4b3 100644 (file)
@@ -82,7 +82,7 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
 #define        XFS_TRANS_ATTR_RM               23
 #define        XFS_TRANS_ATTR_FLAG             24
 #define        XFS_TRANS_CLEAR_AGI_BUCKET      25
-#define XFS_TRANS_QM_SBCHANGE          26
+#define XFS_TRANS_SB_CHANGE            26
 /*
  * Dummy entries since we use the transaction type to index into the
  * trans_type[] in xlog_recover_print_trans_head()
@@ -95,17 +95,15 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
 #define XFS_TRANS_QM_DQCLUSTER         32
 #define XFS_TRANS_QM_QINOCREATE                33
 #define XFS_TRANS_QM_QUOTAOFF_END      34
-#define XFS_TRANS_SB_UNIT              35
-#define XFS_TRANS_FSYNC_TS             36
-#define        XFS_TRANS_GROWFSRT_ALLOC        37
-#define        XFS_TRANS_GROWFSRT_ZERO         38
-#define        XFS_TRANS_GROWFSRT_FREE         39
-#define        XFS_TRANS_SWAPEXT               40
-#define        XFS_TRANS_SB_COUNT              41
-#define        XFS_TRANS_CHECKPOINT            42
-#define        XFS_TRANS_ICREATE               43
-#define        XFS_TRANS_CREATE_TMPFILE        44
-#define        XFS_TRANS_TYPE_MAX              44
+#define XFS_TRANS_FSYNC_TS             35
+#define        XFS_TRANS_GROWFSRT_ALLOC        36
+#define        XFS_TRANS_GROWFSRT_ZERO         37
+#define        XFS_TRANS_GROWFSRT_FREE         38
+#define        XFS_TRANS_SWAPEXT               39
+#define        XFS_TRANS_CHECKPOINT            40
+#define        XFS_TRANS_ICREATE               41
+#define        XFS_TRANS_CREATE_TMPFILE        42
+#define        XFS_TRANS_TYPE_MAX              43
 /* new transaction types need to be reflected in xfs_logprint(8) */
 
 #define XFS_TRANS_TYPES \
@@ -113,7 +111,6 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
        { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
        { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
        { XFS_TRANS_CREATE,             "CREATE" }, \
-       { XFS_TRANS_CREATE_TMPFILE,     "CREATE_TMPFILE" }, \
        { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
        { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
        { XFS_TRANS_REMOVE,             "REMOVE" }, \
@@ -134,23 +131,23 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
        { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
        { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
        { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
-       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
+       { XFS_TRANS_SB_CHANGE,          "SBCHANGE" }, \
+       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
+       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
        { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
        { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
        { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
        { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
        { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
        { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
-       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
        { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
        { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
        { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
        { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
        { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
-       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
        { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
-       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
-       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
+       { XFS_TRANS_ICREATE,            "ICREATE" }, \
+       { XFS_TRANS_CREATE_TMPFILE,     "CREATE_TMPFILE" }, \
        { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
 
 /*
index c80c523..e7e26bd 100644 (file)
@@ -178,6 +178,8 @@ xfs_symlink_local_to_remote(
        struct xfs_mount        *mp = ip->i_mount;
        char                    *buf;
 
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
+
        if (!xfs_sb_version_hascrc(&mp->m_sb)) {
                bp->b_ops = NULL;
                memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
index 6c1330f..68cb1e7 100644 (file)
@@ -715,17 +715,6 @@ xfs_calc_clear_agi_bucket_reservation(
        return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
 }
 
-/*
- * Clearing the quotaflags in the superblock.
- *     the super block for changing quota flags: sector size
- */
-STATIC uint
-xfs_calc_qm_sbchange_reservation(
-       struct xfs_mount        *mp)
-{
-       return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
-}
-
 /*
  * Adjusting quota limits.
  *    the xfs_disk_dquot_t: sizeof(struct xfs_disk_dquot)
@@ -864,9 +853,6 @@ xfs_trans_resv_calc(
         * The following transactions are logged in logical format with
         * a default log count.
         */
-       resp->tr_qm_sbchange.tr_logres = xfs_calc_qm_sbchange_reservation(mp);
-       resp->tr_qm_sbchange.tr_logcount = XFS_DEFAULT_LOG_COUNT;
-
        resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation(mp);
        resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
 
index 1097d14..2d5bdfc 100644 (file)
@@ -56,7 +56,6 @@ struct xfs_trans_resv {
        struct xfs_trans_res    tr_growrtalloc; /* grow realtime allocations */
        struct xfs_trans_res    tr_growrtzero;  /* grow realtime zeroing */
        struct xfs_trans_res    tr_growrtfree;  /* grow realtime freeing */
-       struct xfs_trans_res    tr_qm_sbchange; /* change quota flags */
        struct xfs_trans_res    tr_qm_setqlim;  /* adjust quota limits */
        struct xfs_trans_res    tr_qm_dqalloc;  /* allocate quota on disk */
        struct xfs_trans_res    tr_qm_quotaoff; /* turn quota off */
similarity index 100%
rename from fs/xfs/xfs_types.h
rename to fs/xfs/libxfs/xfs_types.h
index 18e2f3b..3a9b7a1 100644 (file)
@@ -135,30 +135,22 @@ xfs_setfilesize_trans_alloc(
  */
 STATIC int
 xfs_setfilesize(
-       struct xfs_ioend        *ioend)
+       struct xfs_inode        *ip,
+       struct xfs_trans        *tp,
+       xfs_off_t               offset,
+       size_t                  size)
 {
-       struct xfs_inode        *ip = XFS_I(ioend->io_inode);
-       struct xfs_trans        *tp = ioend->io_append_trans;
        xfs_fsize_t             isize;
 
-       /*
-        * The transaction may have been allocated in the I/O submission thread,
-        * thus we need to mark ourselves as beeing in a transaction manually.
-        * Similarly for freeze protection.
-        */
-       current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
-       rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
-                          0, 1, _THIS_IP_);
-
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-       isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
+       isize = xfs_new_eof(ip, offset + size);
        if (!isize) {
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                xfs_trans_cancel(tp, 0);
                return 0;
        }
 
-       trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
+       trace_xfs_setfilesize(ip, offset, size);
 
        ip->i_d.di_size = isize;
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -167,6 +159,25 @@ xfs_setfilesize(
        return xfs_trans_commit(tp, 0);
 }
 
+STATIC int
+xfs_setfilesize_ioend(
+       struct xfs_ioend        *ioend)
+{
+       struct xfs_inode        *ip = XFS_I(ioend->io_inode);
+       struct xfs_trans        *tp = ioend->io_append_trans;
+
+       /*
+        * The transaction may have been allocated in the I/O submission thread,
+        * thus we need to mark ourselves as being in a transaction manually.
+        * Similarly for freeze protection.
+        */
+       current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
+       rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+                          0, 1, _THIS_IP_);
+
+       return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
+}
+
 /*
  * Schedule IO completion handling on the final put of an ioend.
  *
@@ -182,8 +193,7 @@ xfs_finish_ioend(
 
                if (ioend->io_type == XFS_IO_UNWRITTEN)
                        queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
-               else if (ioend->io_append_trans ||
-                        (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
+               else if (ioend->io_append_trans)
                        queue_work(mp->m_data_workqueue, &ioend->io_work);
                else
                        xfs_destroy_ioend(ioend);
@@ -215,22 +225,8 @@ xfs_end_io(
        if (ioend->io_type == XFS_IO_UNWRITTEN) {
                error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
                                                  ioend->io_size);
-       } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
-               /*
-                * For direct I/O we do not know if we need to allocate blocks
-                * or not so we can't preallocate an append transaction as that
-                * results in nested reservations and log space deadlocks. Hence
-                * allocate the transaction here. While this is sub-optimal and
-                * can block IO completion for some time, we're stuck with doing
-                * it this way until we can pass the ioend to the direct IO
-                * allocation callbacks and avoid nesting that way.
-                */
-               error = xfs_setfilesize_trans_alloc(ioend);
-               if (error)
-                       goto done;
-               error = xfs_setfilesize(ioend);
        } else if (ioend->io_append_trans) {
-               error = xfs_setfilesize(ioend);
+               error = xfs_setfilesize_ioend(ioend);
        } else {
                ASSERT(!xfs_ioend_is_append(ioend));
        }
@@ -241,17 +237,6 @@ done:
        xfs_destroy_ioend(ioend);
 }
 
-/*
- * Call IO completion handling in caller context on the final put of an ioend.
- */
-STATIC void
-xfs_finish_ioend_sync(
-       struct xfs_ioend        *ioend)
-{
-       if (atomic_dec_and_test(&ioend->io_remaining))
-               xfs_end_io(&ioend->io_work);
-}
-
 /*
  * Allocate and initialise an IO completion structure.
  * We need to track unwritten extent write completion here initially.
@@ -273,7 +258,6 @@ xfs_alloc_ioend(
         * all the I/O from calling the completion routine too early.
         */
        atomic_set(&ioend->io_remaining, 1);
-       ioend->io_isdirect = 0;
        ioend->io_error = 0;
        ioend->io_list = NULL;
        ioend->io_type = type;
@@ -1459,11 +1443,7 @@ xfs_get_blocks_direct(
  *
  * If the private argument is non-NULL __xfs_get_blocks signals us that we
  * need to issue a transaction to convert the range from unwritten to written
- * extents.  In case this is regular synchronous I/O we just call xfs_end_io
- * to do this and we are done.  But in case this was a successful AIO
- * request this handler is called from interrupt context, from which we
- * can't start transactions.  In that case offload the I/O completion to
- * the workqueues we also use for buffered I/O completion.
+ * extents.
  */
 STATIC void
 xfs_end_io_direct_write(
@@ -1472,7 +1452,12 @@ xfs_end_io_direct_write(
        ssize_t                 size,
        void                    *private)
 {
-       struct xfs_ioend        *ioend = iocb->private;
+       struct inode            *inode = file_inode(iocb->ki_filp);
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return;
 
        /*
         * While the generic direct I/O code updates the inode size, it does
@@ -1480,22 +1465,33 @@ xfs_end_io_direct_write(
         * end_io handler thinks the on-disk size is outside the in-core
         * size.  To prevent this just update it a little bit earlier here.
         */
-       if (offset + size > i_size_read(ioend->io_inode))
-               i_size_write(ioend->io_inode, offset + size);
+       if (offset + size > i_size_read(inode))
+               i_size_write(inode, offset + size);
 
        /*
-        * blockdev_direct_IO can return an error even after the I/O
-        * completion handler was called.  Thus we need to protect
-        * against double-freeing.
+        * For direct I/O we do not know if we need to allocate blocks or not,
+        * so we can't preallocate an append transaction, as that results in
+        * nested reservations and log space deadlocks. Hence allocate the
+        * transaction here. While this is sub-optimal and can block IO
+        * completion for some time, we're stuck with doing it this way until
+        * we can pass the ioend to the direct IO allocation callbacks and
+        * avoid nesting that way.
         */
-       iocb->private = NULL;
-
-       ioend->io_offset = offset;
-       ioend->io_size = size;
-       if (private && size > 0)
-               ioend->io_type = XFS_IO_UNWRITTEN;
+       if (private && size > 0) {
+               xfs_iomap_write_unwritten(ip, offset, size);
+       } else if (offset + size > ip->i_d.di_size) {
+               struct xfs_trans        *tp;
+               int                     error;
+
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+               if (error) {
+                       xfs_trans_cancel(tp, 0);
+                       return;
+               }
 
-       xfs_finish_ioend_sync(ioend);
+               xfs_setfilesize(ip, tp, offset, size);
+       }
 }
 
 STATIC ssize_t
@@ -1507,39 +1503,16 @@ xfs_vm_direct_IO(
 {
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
-       struct xfs_ioend        *ioend = NULL;
-       ssize_t                 ret;
 
        if (rw & WRITE) {
-               size_t size = iov_iter_count(iter);
-
-               /*
-                * We cannot preallocate a size update transaction here as we
-                * don't know whether allocation is necessary or not. Hence we
-                * can only tell IO completion that one is necessary if we are
-                * not doing unwritten extent conversion.
-                */
-               iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
-               if (offset + size > XFS_I(inode)->i_d.di_size)
-                       ioend->io_isdirect = 1;
-
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+               return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
                                            offset, xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL,
                                            DIO_ASYNC_EXTEND);
-               if (ret != -EIOCBQUEUED && iocb->private)
-                       goto out_destroy_ioend;
-       } else {
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
-                                           offset, xfs_get_blocks_direct,
-                                           NULL, NULL, 0);
        }
-
-       return ret;
-
-out_destroy_ioend:
-       xfs_destroy_ioend(ioend);
-       return ret;
+       return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
+                                   offset, xfs_get_blocks_direct,
+                                   NULL, NULL, 0);
 }
 
 /*
index f94dd45..ac644e0 100644 (file)
@@ -24,14 +24,12 @@ extern mempool_t *xfs_ioend_pool;
  * Types of I/O for bmap clustering and I/O completion tracking.
  */
 enum {
-       XFS_IO_DIRECT = 0,      /* special case for direct I/O ioends */
        XFS_IO_DELALLOC,        /* covers delalloc region */
        XFS_IO_UNWRITTEN,       /* covers allocated but uninitialized data */
        XFS_IO_OVERWRITE,       /* covers already allocated extent */
 };
 
 #define XFS_IO_TYPES \
-       { 0,                    "" }, \
        { XFS_IO_DELALLOC,              "delalloc" }, \
        { XFS_IO_UNWRITTEN,             "unwritten" }, \
        { XFS_IO_OVERWRITE,             "overwrite" }
@@ -45,7 +43,6 @@ typedef struct xfs_ioend {
        unsigned int            io_type;        /* delalloc / unwritten */
        int                     io_error;       /* I/O error code */
        atomic_t                io_remaining;   /* hold count */
-       unsigned int            io_isdirect : 1;/* direct I/O */
        struct inode            *io_inode;      /* file being written to */
        struct buffer_head      *io_buffer_head;/* buffer linked list head */
        struct buffer_head      *io_buffer_tail;/* buffer linked list tail */
index 2fdb72d..736429a 100644 (file)
@@ -26,43 +26,8 @@ struct xfs_ifork;
 struct xfs_inode;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_bmalloca;
 
-/*
- * Argument structure for xfs_bmap_alloc.
- */
-struct xfs_bmalloca {
-       xfs_fsblock_t           *firstblock; /* i/o first block allocated */
-       struct xfs_bmap_free    *flist; /* bmap freelist */
-       struct xfs_trans        *tp;    /* transaction pointer */
-       struct xfs_inode        *ip;    /* incore inode pointer */
-       struct xfs_bmbt_irec    prev;   /* extent before the new one */
-       struct xfs_bmbt_irec    got;    /* extent after, or delayed */
-
-       xfs_fileoff_t           offset; /* offset in file filling in */
-       xfs_extlen_t            length; /* i/o length asked/allocated */
-       xfs_fsblock_t           blkno;  /* starting block of new extent */
-
-       struct xfs_btree_cur    *cur;   /* btree cursor */
-       xfs_extnum_t            idx;    /* current extent index */
-       int                     nallocs;/* number of extents alloc'd */
-       int                     logflags;/* flags for transaction logging */
-
-       xfs_extlen_t            total;  /* total blocks needed for xaction */
-       xfs_extlen_t            minlen; /* minimum allocation size (blocks) */
-       xfs_extlen_t            minleft; /* amount must be left after alloc */
-       bool                    eof;    /* set if allocating past last extent */
-       bool                    wasdel; /* replacing a delayed allocation */
-       bool                    userdata;/* set if is user data */
-       bool                    aeof;   /* allocated space at eof */
-       bool                    conv;   /* overwriting unwritten extents */
-       int                     flags;
-       struct completion       *done;
-       struct work_struct      work;
-       int                     result;
-};
-
-int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
-                       int *committed);
 int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
 int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
                     int whichfork, int *eof);
index 3f9bd58..507d96a 100644 (file)
@@ -319,6 +319,10 @@ xfs_buf_item_format(
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
               (bip->bli_flags & XFS_BLI_STALE));
+       ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
+              (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
+               && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
+
 
        /*
         * If it is an inode buffer, transfer the in-memory state to the
@@ -535,7 +539,7 @@ xfs_buf_item_push(
        if ((bp->b_flags & XBF_WRITE_FAIL) &&
            ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
                xfs_warn(bp->b_target->bt_mount,
-"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+"Detected failing async write on buffer block 0x%llx. Retrying async write.",
                         (long long)bp->b_bn);
        }
 
index c24c67e..2f536f3 100644 (file)
@@ -86,7 +86,7 @@ static inline void xfs_dqflock(xfs_dquot_t *dqp)
        wait_for_completion(&dqp->q_flush);
 }
 
-static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp)
+static inline bool xfs_dqflock_nowait(xfs_dquot_t *dqp)
 {
        return try_wait_for_completion(&dqp->q_flush);
 }
index ac7f1e8..f2d05a1 100644 (file)
@@ -127,6 +127,42 @@ xfs_iozero(
        return (-status);
 }
 
+int
+xfs_update_prealloc_flags(
+       struct xfs_inode        *ip,
+       enum xfs_prealloc_flags flags)
+{
+       struct xfs_trans        *tp;
+       int                     error;
+
+       tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       if (!(flags & XFS_PREALLOC_INVISIBLE)) {
+               ip->i_d.di_mode &= ~S_ISUID;
+               if (ip->i_d.di_mode & S_IXGRP)
+                       ip->i_d.di_mode &= ~S_ISGID;
+               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       }
+
+       if (flags & XFS_PREALLOC_SET)
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+       if (flags & XFS_PREALLOC_CLEAR)
+               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       if (flags & XFS_PREALLOC_SYNC)
+               xfs_trans_set_sync(tp);
+       return xfs_trans_commit(tp, 0);
+}
+
 /*
  * Fsync operations on directories are much simpler than on regular files,
  * as there is no file data to flush, and thus also no need for explicit
@@ -784,8 +820,8 @@ xfs_file_fallocate(
 {
        struct inode            *inode = file_inode(file);
        struct xfs_inode        *ip = XFS_I(inode);
-       struct xfs_trans        *tp;
        long                    error;
+       enum xfs_prealloc_flags flags = 0;
        loff_t                  new_size = 0;
 
        if (!S_ISREG(inode->i_mode))
@@ -822,6 +858,8 @@ xfs_file_fallocate(
                if (error)
                        goto out_unlock;
        } else {
+               flags |= XFS_PREALLOC_SET;
+
                if (!(mode & FALLOC_FL_KEEP_SIZE) &&
                    offset + len > i_size_read(inode)) {
                        new_size = offset + len;
@@ -839,28 +877,10 @@ xfs_file_fallocate(
                        goto out_unlock;
        }
 
-       tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
-       error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               goto out_unlock;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       ip->i_d.di_mode &= ~S_ISUID;
-       if (ip->i_d.di_mode & S_IXGRP)
-               ip->i_d.di_mode &= ~S_ISGID;
-
-       if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
-               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-
-       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
        if (file->f_flags & O_DSYNC)
-               xfs_trans_set_sync(tp);
-       error = xfs_trans_commit(tp, 0);
+               flags |= XFS_PREALLOC_SYNC;
+
+       error = xfs_update_prealloc_flags(ip, flags);
        if (error)
                goto out_unlock;
 
index fdc6422..fba6532 100644 (file)
@@ -488,6 +488,7 @@ xfs_growfs_data_private(
                xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
        if (dpct)
                xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
+       xfs_trans_set_sync(tp);
        error = xfs_trans_commit(tp, 0);
        if (error)
                return error;
@@ -541,7 +542,7 @@ xfs_growfs_data_private(
                        saved_error = error;
                        continue;
                }
-               xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
+               xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
 
                error = xfs_bwrite(bp);
                xfs_buf_relse(bp);
@@ -756,37 +757,6 @@ out:
        return 0;
 }
 
-/*
- * Dump a transaction into the log that contains no real change. This is needed
- * to be able to make the log dirty or stamp the current tail LSN into the log
- * during the covering operation.
- *
- * We cannot use an inode here for this - that will push dirty state back up
- * into the VFS and then periodic inode flushing will prevent log covering from
- * making progress. Hence we log a field in the superblock instead and use a
- * synchronous transaction to ensure the superblock is immediately unpinned
- * and can be written back.
- */
-int
-xfs_fs_log_dummy(
-       xfs_mount_t     *mp)
-{
-       xfs_trans_t     *tp;
-       int             error;
-
-       tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       /* log the UUID because it is an unchanging field */
-       xfs_mod_sb(tp, XFS_SB_UUID);
-       xfs_trans_set_sync(tp);
-       return xfs_trans_commit(tp, 0);
-}
-
 int
 xfs_fs_goingdown(
        xfs_mount_t     *mp,
index 41f804e..daafa1f 100644 (file)
@@ -1995,6 +1995,7 @@ xfs_iunlink(
        agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
        offset = offsetof(xfs_agi_t, agi_unlinked) +
                (sizeof(xfs_agino_t) * bucket_index);
+       xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
        xfs_trans_log_buf(tp, agibp, offset,
                          (offset + sizeof(xfs_agino_t) - 1));
        return 0;
@@ -2086,6 +2087,7 @@ xfs_iunlink_remove(
                agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
                offset = offsetof(xfs_agi_t, agi_unlinked) +
                        (sizeof(xfs_agino_t) * bucket_index);
+               xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
                xfs_trans_log_buf(tp, agibp, offset,
                                  (offset + sizeof(xfs_agino_t) - 1));
        } else {
@@ -2655,6 +2657,124 @@ xfs_sort_for_rename(
        }
 }
 
+/*
+ * xfs_cross_rename()
+ *
+ * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
+ */
+STATIC int
+xfs_cross_rename(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *dp1,
+       struct xfs_name         *name1,
+       struct xfs_inode        *ip1,
+       struct xfs_inode        *dp2,
+       struct xfs_name         *name2,
+       struct xfs_inode        *ip2,
+       struct xfs_bmap_free    *free_list,
+       xfs_fsblock_t           *first_block,
+       int                     spaceres)
+{
+       int             error = 0;
+       int             ip1_flags = 0;
+       int             ip2_flags = 0;
+       int             dp2_flags = 0;
+
+       /* Swap inode number for dirent in first parent */
+       error = xfs_dir_replace(tp, dp1, name1,
+                               ip2->i_ino,
+                               first_block, free_list, spaceres);
+       if (error)
+               goto out;
+
+       /* Swap inode number for dirent in second parent */
+       error = xfs_dir_replace(tp, dp2, name2,
+                               ip1->i_ino,
+                               first_block, free_list, spaceres);
+       if (error)
+               goto out;
+
+       /*
+        * If we're renaming one or more directories across different parents,
+        * update the respective ".." entries (and link counts) to match the new
+        * parents.
+        */
+       if (dp1 != dp2) {
+               dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
+               if (S_ISDIR(ip2->i_d.di_mode)) {
+                       error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
+                                               dp1->i_ino, first_block,
+                                               free_list, spaceres);
+                       if (error)
+                               goto out;
+
+                       /* transfer ip2 ".." reference to dp1 */
+                       if (!S_ISDIR(ip1->i_d.di_mode)) {
+                               error = xfs_droplink(tp, dp2);
+                               if (error)
+                                       goto out;
+                               error = xfs_bumplink(tp, dp1);
+                               if (error)
+                                       goto out;
+                       }
+
+                       /*
+                        * Although ip1 isn't changed here, userspace needs
+                        * to be warned about the change, so that applications
+                        * relying on it (like backup ones), will properly
+                        * notify the change
+                        */
+                       ip1_flags |= XFS_ICHGTIME_CHG;
+                       ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+               }
+
+               if (S_ISDIR(ip1->i_d.di_mode)) {
+                       error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
+                                               dp2->i_ino, first_block,
+                                               free_list, spaceres);
+                       if (error)
+                               goto out;
+
+                       /* transfer ip1 ".." reference to dp2 */
+                       if (!S_ISDIR(ip2->i_d.di_mode)) {
+                               error = xfs_droplink(tp, dp1);
+                               if (error)
+                                       goto out;
+                               error = xfs_bumplink(tp, dp2);
+                               if (error)
+                                       goto out;
+                       }
+
+                       /*
+                        * Although ip2 isn't changed here, userspace needs
+                        * to be warned about the change, so that applications
+                        * relying on it (like backup ones), will properly
+                        * notify the change
+                        */
+                       ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+                       ip2_flags |= XFS_ICHGTIME_CHG;
+               }
+       }
+
+       if (ip1_flags) {
+               xfs_trans_ichgtime(tp, ip1, ip1_flags);
+               xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
+       }
+       if (ip2_flags) {
+               xfs_trans_ichgtime(tp, ip2, ip2_flags);
+               xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
+       }
+       if (dp2_flags) {
+               xfs_trans_ichgtime(tp, dp2, dp2_flags);
+               xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
+       }
+       xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
+out:
+       return error;
+}
+
 /*
  * xfs_rename
  */
@@ -2665,7 +2785,8 @@ xfs_rename(
        xfs_inode_t     *src_ip,
        xfs_inode_t     *target_dp,
        struct xfs_name *target_name,
-       xfs_inode_t     *target_ip)
+       xfs_inode_t     *target_ip,
+       unsigned int    flags)
 {
        xfs_trans_t     *tp = NULL;
        xfs_mount_t     *mp = src_dp->i_mount;
@@ -2742,6 +2863,18 @@ xfs_rename(
                goto error_return;
        }
 
+       /*
+        * Handle RENAME_EXCHANGE flags
+        */
+       if (flags & RENAME_EXCHANGE) {
+               error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
+                                        target_dp, target_name, target_ip,
+                                        &free_list, &first_block, spaceres);
+               if (error)
+                       goto abort_return;
+               goto finish_rename;
+       }
+
        /*
         * Set up the target.
         */
@@ -2881,6 +3014,7 @@ xfs_rename(
        if (new_parent)
                xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
 
+finish_rename:
        /*
         * If this is a synchronous mount, make sure that the
         * rename transaction goes to disk before returning to
index 4ed2ba9..86cd6b3 100644 (file)
@@ -338,7 +338,7 @@ int         xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip,
 int            xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
                           struct xfs_inode *src_ip, struct xfs_inode *target_dp,
                           struct xfs_name *target_name,
-                          struct xfs_inode *target_ip);
+                          struct xfs_inode *target_ip, unsigned int flags);
 
 void           xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
@@ -377,6 +377,15 @@ int                xfs_droplink(struct xfs_trans *, struct xfs_inode *);
 int            xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
 
 /* from xfs_file.c */
+enum xfs_prealloc_flags {
+       XFS_PREALLOC_SET        = (1 << 1),
+       XFS_PREALLOC_CLEAR      = (1 << 2),
+       XFS_PREALLOC_SYNC       = (1 << 3),
+       XFS_PREALLOC_INVISIBLE  = (1 << 4),
+};
+
+int            xfs_update_prealloc_flags(struct xfs_inode *,
+                       enum xfs_prealloc_flags);
 int            xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
 int            xfs_iozero(struct xfs_inode *, loff_t, size_t);
 
index a183198..f7afb86 100644 (file)
@@ -606,11 +606,8 @@ xfs_ioc_space(
        unsigned int            cmd,
        xfs_flock64_t           *bf)
 {
-       struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_trans        *tp;
        struct iattr            iattr;
-       bool                    setprealloc = false;
-       bool                    clrprealloc = false;
+       enum xfs_prealloc_flags flags = 0;
        int                     error;
 
        /*
@@ -630,6 +627,11 @@ xfs_ioc_space(
        if (!S_ISREG(inode->i_mode))
                return -EINVAL;
 
+       if (filp->f_flags & O_DSYNC)
+               flags |= XFS_PREALLOC_SYNC;
+       if (ioflags & XFS_IO_INVIS)     
+               flags |= XFS_PREALLOC_INVISIBLE;
+
        error = mnt_want_write_file(filp);
        if (error)
                return error;
@@ -673,25 +675,23 @@ xfs_ioc_space(
        }
 
        if (bf->l_start < 0 ||
-           bf->l_start > mp->m_super->s_maxbytes ||
+           bf->l_start > inode->i_sb->s_maxbytes ||
            bf->l_start + bf->l_len < 0 ||
-           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+           bf->l_start + bf->l_len >= inode->i_sb->s_maxbytes) {
                error = -EINVAL;
                goto out_unlock;
        }
 
        switch (cmd) {
        case XFS_IOC_ZERO_RANGE:
+               flags |= XFS_PREALLOC_SET;
                error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
-               if (!error)
-                       setprealloc = true;
                break;
        case XFS_IOC_RESVSP:
        case XFS_IOC_RESVSP64:
+               flags |= XFS_PREALLOC_SET;
                error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
                                                XFS_BMAPI_PREALLOC);
-               if (!error)
-                       setprealloc = true;
                break;
        case XFS_IOC_UNRESVSP:
        case XFS_IOC_UNRESVSP64:
@@ -701,6 +701,7 @@ xfs_ioc_space(
        case XFS_IOC_ALLOCSP64:
        case XFS_IOC_FREESP:
        case XFS_IOC_FREESP64:
+               flags |= XFS_PREALLOC_CLEAR;
                if (bf->l_start > XFS_ISIZE(ip)) {
                        error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
                                        bf->l_start - XFS_ISIZE(ip), 0);
@@ -712,8 +713,6 @@ xfs_ioc_space(
                iattr.ia_size = bf->l_start;
 
                error = xfs_setattr_size(ip, &iattr);
-               if (!error)
-                       clrprealloc = true;
                break;
        default:
                ASSERT(0);
@@ -723,32 +722,7 @@ xfs_ioc_space(
        if (error)
                goto out_unlock;
 
-       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               goto out_unlock;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       if (!(ioflags & XFS_IO_INVIS)) {
-               ip->i_d.di_mode &= ~S_ISUID;
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
-               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       }
-
-       if (setprealloc)
-               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-       else if (clrprealloc)
-               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       if (filp->f_flags & O_DSYNC)
-               xfs_trans_set_sync(tp);
-       error = xfs_trans_commit(tp, 0);
+       error = xfs_update_prealloc_flags(ip, flags);
 
 out_unlock:
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
@@ -1013,20 +987,182 @@ xfs_diflags_to_linux(
                inode->i_flags &= ~S_NOATIME;
 }
 
-#define FSX_PROJID     1
-#define FSX_EXTSIZE    2
-#define FSX_XFLAGS     4
-#define FSX_NONBLOCK   8
+static int
+xfs_ioctl_setattr_xflags(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       struct fsxattr          *fa)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       /* Can't change realtime flag if any extents are allocated. */
+       if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+           XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & XFS_XFLAG_REALTIME))
+               return -EINVAL;
+
+       /* If realtime flag is set then must have realtime device */
+       if (fa->fsx_xflags & XFS_XFLAG_REALTIME) {
+               if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
+                   (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
+                       return -EINVAL;
+       }
+
+       /*
+        * Can't modify an immutable/append-only file unless
+        * we have appropriate permission.
+        */
+       if (((ip->i_d.di_flags & (XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND)) ||
+            (fa->fsx_xflags & (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
+           !capable(CAP_LINUX_IMMUTABLE))
+               return -EPERM;
+
+       xfs_set_diflags(ip, fa->fsx_xflags);
+       xfs_diflags_to_linux(ip);
+       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       XFS_STATS_INC(xs_ig_attrchg);
+       return 0;
+}
+
+/*
+ * Set up the transaction structure for the setattr operation, checking that we
+ * have permission to do so. On success, return a clean transaction and the
+ * inode locked exclusively ready for further operation specific checks. On
+ * failure, return an error without modifying or locking the inode.
+ */
+static struct xfs_trans *
+xfs_ioctl_setattr_get_trans(
+       struct xfs_inode        *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (mp->m_flags & XFS_MOUNT_RDONLY)
+               return ERR_PTR(-EROFS);
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return ERR_PTR(-EIO);
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error)
+               goto out_cancel;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       /*
+        * CAP_FOWNER overrides the following restrictions:
+        *
+        * The user ID of the calling process must be equal to the file owner
+        * ID, except in cases where the CAP_FSETID capability is applicable.
+        */
+       if (!inode_owner_or_capable(VFS_I(ip))) {
+               error = -EPERM;
+               goto out_cancel;
+       }
+
+       if (mp->m_flags & XFS_MOUNT_WSYNC)
+               xfs_trans_set_sync(tp);
+
+       return tp;
+
+out_cancel:
+       xfs_trans_cancel(tp, 0);
+       return ERR_PTR(error);
+}
+
+/*
+ * extent size hint validation is somewhat cumbersome. Rules are:
+ *
+ * 1. extent size hint is only valid for directories and regular files
+ * 2. XFS_XFLAG_EXTSIZE is only valid for regular files
+ * 3. XFS_XFLAG_EXTSZINHERIT is only valid for directories.
+ * 4. can only be changed on regular files if no extents are allocated
+ * 5. can be changed on directories at any time
+ * 6. extsize hint of 0 turns off hints, clears inode flags.
+ * 7. Extent size must be a multiple of the appropriate block size.
+ * 8. for non-realtime files, the extent size hint must be limited
+ *    to half the AG size to avoid alignment extending the extent beyond the
+ *    limits of the AG.
+ */
+static int
+xfs_ioctl_setattr_check_extsize(
+       struct xfs_inode        *ip,
+       struct fsxattr          *fa)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if ((fa->fsx_xflags & XFS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
+               return -EINVAL;
+
+       if ((fa->fsx_xflags & XFS_XFLAG_EXTSZINHERIT) &&
+           !S_ISDIR(ip->i_d.di_mode))
+               return -EINVAL;
+
+       if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
+           ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
+               return -EINVAL;
+
+       if (fa->fsx_extsize != 0) {
+               xfs_extlen_t    size;
+               xfs_fsblock_t   extsize_fsb;
+
+               extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+               if (extsize_fsb > MAXEXTLEN)
+                       return -EINVAL;
+
+               if (XFS_IS_REALTIME_INODE(ip) ||
+                   (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
+                       size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
+               } else {
+                       size = mp->m_sb.sb_blocksize;
+                       if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
+                               return -EINVAL;
+               }
+
+               if (fa->fsx_extsize % size)
+                       return -EINVAL;
+       } else
+               fa->fsx_xflags &= ~(XFS_XFLAG_EXTSIZE | XFS_XFLAG_EXTSZINHERIT);
+
+       return 0;
+}
+
+static int
+xfs_ioctl_setattr_check_projid(
+       struct xfs_inode        *ip,
+       struct fsxattr          *fa)
+{
+       /* Disallow 32bit project ids if projid32bit feature is not enabled. */
+       if (fa->fsx_projid > (__uint16_t)-1 &&
+           !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
+               return -EINVAL;
+
+       /*
+        * Project Quota ID state is only allowed to change from within the init
+        * namespace. Enforce that restriction only if we are trying to change
+        * the quota ID state. Everything else is allowed in user namespaces.
+        */
+       if (current_user_ns() == &init_user_ns)
+               return 0;
+
+       if (xfs_get_projid(ip) != fa->fsx_projid)
+               return -EINVAL;
+       if ((fa->fsx_xflags & XFS_XFLAG_PROJINHERIT) !=
+           (ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT))
+               return -EINVAL;
+
+       return 0;
+}
 
 STATIC int
 xfs_ioctl_setattr(
        xfs_inode_t             *ip,
-       struct fsxattr          *fa,
-       int                     mask)
+       struct fsxattr          *fa)
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_trans        *tp;
-       unsigned int            lock_flags = 0;
        struct xfs_dquot        *udqp = NULL;
        struct xfs_dquot        *pdqp = NULL;
        struct xfs_dquot        *olddquot = NULL;
@@ -1034,17 +1170,9 @@ xfs_ioctl_setattr(
 
        trace_xfs_ioctl_setattr(ip);
 
-       if (mp->m_flags & XFS_MOUNT_RDONLY)
-               return -EROFS;
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
-       /*
-        * Disallow 32bit project ids when projid32bit feature is not enabled.
-        */
-       if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) &&
-                       !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
-               return -EINVAL;
+       code = xfs_ioctl_setattr_check_projid(ip, fa);
+       if (code)
+               return code;
 
        /*
         * If disk quotas is on, we make sure that the dquots do exist on disk,
@@ -1054,7 +1182,7 @@ xfs_ioctl_setattr(
         * If the IDs do change before we take the ilock, we're covered
         * because the i_*dquot fields will get updated anyway.
         */
-       if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
+       if (XFS_IS_QUOTA_ON(mp)) {
                code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
                                         ip->i_d.di_gid, fa->fsx_projid,
                                         XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
@@ -1062,175 +1190,49 @@ xfs_ioctl_setattr(
                        return code;
        }
 
-       /*
-        * For the other attributes, we acquire the inode lock and
-        * first do an error checking pass.
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
-       code = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
-       if (code)
-               goto error_return;
-
-       lock_flags = XFS_ILOCK_EXCL;
-       xfs_ilock(ip, lock_flags);
-
-       /*
-        * CAP_FOWNER overrides the following restrictions:
-        *
-        * The user ID of the calling process must be equal
-        * to the file owner ID, except in cases where the
-        * CAP_FSETID capability is applicable.
-        */
-       if (!inode_owner_or_capable(VFS_I(ip))) {
-               code = -EPERM;
-               goto error_return;
-       }
-
-       /*
-        * Do a quota reservation only if projid is actually going to change.
-        * Only allow changing of projid from init_user_ns since it is a
-        * non user namespace aware identifier.
-        */
-       if (mask & FSX_PROJID) {
-               if (current_user_ns() != &init_user_ns) {
-                       code = -EINVAL;
-                       goto error_return;
-               }
-
-               if (XFS_IS_QUOTA_RUNNING(mp) &&
-                   XFS_IS_PQUOTA_ON(mp) &&
-                   xfs_get_projid(ip) != fa->fsx_projid) {
-                       ASSERT(tp);
-                       code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
-                                               pdqp, capable(CAP_FOWNER) ?
-                                               XFS_QMOPT_FORCE_RES : 0);
-                       if (code)       /* out of quota */
-                               goto error_return;
-               }
+       tp = xfs_ioctl_setattr_get_trans(ip);
+       if (IS_ERR(tp)) {
+               code = PTR_ERR(tp);
+               goto error_free_dquots;
        }
 
-       if (mask & FSX_EXTSIZE) {
-               /*
-                * Can't change extent size if any extents are allocated.
-                */
-               if (ip->i_d.di_nextents &&
-                   ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
-                    fa->fsx_extsize)) {
-                       code = -EINVAL; /* EFBIG? */
-                       goto error_return;
-               }
 
-               /*
-                * Extent size must be a multiple of the appropriate block
-                * size, if set at all. It must also be smaller than the
-                * maximum extent size supported by the filesystem.
-                *
-                * Also, for non-realtime files, limit the extent size hint to
-                * half the size of the AGs in the filesystem so alignment
-                * doesn't result in extents larger than an AG.
-                */
-               if (fa->fsx_extsize != 0) {
-                       xfs_extlen_t    size;
-                       xfs_fsblock_t   extsize_fsb;
-
-                       extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
-                       if (extsize_fsb > MAXEXTLEN) {
-                               code = -EINVAL;
-                               goto error_return;
-                       }
-
-                       if (XFS_IS_REALTIME_INODE(ip) ||
-                           ((mask & FSX_XFLAGS) &&
-                           (fa->fsx_xflags & XFS_XFLAG_REALTIME))) {
-                               size = mp->m_sb.sb_rextsize <<
-                                      mp->m_sb.sb_blocklog;
-                       } else {
-                               size = mp->m_sb.sb_blocksize;
-                               if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
-                                       code = -EINVAL;
-                                       goto error_return;
-                               }
-                       }
-
-                       if (fa->fsx_extsize % size) {
-                               code = -EINVAL;
-                               goto error_return;
-                       }
-               }
+       if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
+           xfs_get_projid(ip) != fa->fsx_projid) {
+               code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
+                               capable(CAP_FOWNER) ?  XFS_QMOPT_FORCE_RES : 0);
+               if (code)       /* out of quota */
+                       goto error_trans_cancel;
        }
 
+       code = xfs_ioctl_setattr_check_extsize(ip, fa);
+       if (code)
+               goto error_trans_cancel;
 
-       if (mask & FSX_XFLAGS) {
-               /*
-                * Can't change realtime flag if any extents are allocated.
-                */
-               if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
-                   (XFS_IS_REALTIME_INODE(ip)) !=
-                   (fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
-                       code = -EINVAL; /* EFBIG? */
-                       goto error_return;
-               }
-
-               /*
-                * If realtime flag is set then must have realtime data.
-                */
-               if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) {
-                       if ((mp->m_sb.sb_rblocks == 0) ||
-                           (mp->m_sb.sb_rextsize == 0) ||
-                           (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) {
-                               code = -EINVAL;
-                               goto error_return;
-                       }
-               }
-
-               /*
-                * Can't modify an immutable/append-only file unless
-                * we have appropriate permission.
-                */
-               if ((ip->i_d.di_flags &
-                               (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) ||
-                    (fa->fsx_xflags &
-                               (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) &&
-                   !capable(CAP_LINUX_IMMUTABLE)) {
-                       code = -EPERM;
-                       goto error_return;
-               }
-       }
-
-       xfs_trans_ijoin(tp, ip, 0);
+       code = xfs_ioctl_setattr_xflags(tp, ip, fa);
+       if (code)
+               goto error_trans_cancel;
 
        /*
-        * Change file ownership.  Must be the owner or privileged.
+        * Change file ownership.  Must be the owner or privileged.  CAP_FSETID
+        * overrides the following restrictions:
+        *
+        * The set-user-ID and set-group-ID bits of a file will be cleared upon
+        * successful return from chown()
         */
-       if (mask & FSX_PROJID) {
-               /*
-                * CAP_FSETID overrides the following restrictions:
-                *
-                * The set-user-ID and set-group-ID bits of a file will be
-                * cleared upon successful return from chown()
-                */
-               if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
-                   !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
-                       ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
-
-               /*
-                * Change the ownerships and register quota modifications
-                * in the transaction.
-                */
-               if (xfs_get_projid(ip) != fa->fsx_projid) {
-                       if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
-                               olddquot = xfs_qm_vop_chown(tp, ip,
-                                                       &ip->i_pdquot, pdqp);
-                       }
-                       ASSERT(ip->i_d.di_version > 1);
-                       xfs_set_projid(ip, fa->fsx_projid);
-               }
 
-       }
+       if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+           !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
+               ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
 
-       if (mask & FSX_XFLAGS) {
-               xfs_set_diflags(ip, fa->fsx_xflags);
-               xfs_diflags_to_linux(ip);
+       /* Change the ownerships and register project quota modifications */
+       if (xfs_get_projid(ip) != fa->fsx_projid) {
+               if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
+                       olddquot = xfs_qm_vop_chown(tp, ip,
+                                               &ip->i_pdquot, pdqp);
+               }
+               ASSERT(ip->i_d.di_version > 1);
+               xfs_set_projid(ip, fa->fsx_projid);
        }
 
        /*
@@ -1238,34 +1240,12 @@ xfs_ioctl_setattr(
         * extent size hint should be set on the inode. If no extent size flags
         * are set on the inode then unconditionally clear the extent size hint.
         */
-       if (mask & FSX_EXTSIZE) {
-               int     extsize = 0;
-
-               if (ip->i_d.di_flags &
-                               (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
-                       extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
-               ip->i_d.di_extsize = extsize;
-       }
-
-       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-       XFS_STATS_INC(xs_ig_attrchg);
+       if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
+               ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
+       else
+               ip->i_d.di_extsize = 0;
 
-       /*
-        * If this is a synchronous mount, make sure that the
-        * transaction goes to disk before returning to the user.
-        * This is slightly sub-optimal in that truncates require
-        * two sync transactions instead of one for wsync filesystems.
-        * One for the truncate and one for the timestamps since we
-        * don't want to change the timestamps unless we're sure the
-        * truncate worked.  Truncates are less than 1% of the laddis
-        * mix so this probably isn't worth the trouble to optimize.
-        */
-       if (mp->m_flags & XFS_MOUNT_WSYNC)
-               xfs_trans_set_sync(tp);
        code = xfs_trans_commit(tp, 0);
-       xfs_iunlock(ip, lock_flags);
 
        /*
         * Release any dquot(s) the inode had kept before chown.
@@ -1276,12 +1256,11 @@ xfs_ioctl_setattr(
 
        return code;
 
- error_return:
+error_trans_cancel:
+       xfs_trans_cancel(tp, 0);
+error_free_dquots:
        xfs_qm_dqrele(udqp);
        xfs_qm_dqrele(pdqp);
-       xfs_trans_cancel(tp, 0);
-       if (lock_flags)
-               xfs_iunlock(ip, lock_flags);
        return code;
 }
 
@@ -1292,20 +1271,15 @@ xfs_ioc_fssetxattr(
        void                    __user *arg)
 {
        struct fsxattr          fa;
-       unsigned int            mask;
        int error;
 
        if (copy_from_user(&fa, arg, sizeof(fa)))
                return -EFAULT;
 
-       mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID;
-       if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-               mask |= FSX_NONBLOCK;
-
        error = mnt_want_write_file(filp);
        if (error)
                return error;
-       error = xfs_ioctl_setattr(ip, &fa, mask);
+       error = xfs_ioctl_setattr(ip, &fa);
        mnt_drop_write_file(filp);
        return error;
 }
@@ -1325,14 +1299,14 @@ xfs_ioc_getxflags(
 
 STATIC int
 xfs_ioc_setxflags(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        struct file             *filp,
        void                    __user *arg)
 {
+       struct xfs_trans        *tp;
        struct fsxattr          fa;
        unsigned int            flags;
-       unsigned int            mask;
-       int error;
+       int                     error;
 
        if (copy_from_user(&flags, arg, sizeof(flags)))
                return -EFAULT;
@@ -1342,15 +1316,26 @@ xfs_ioc_setxflags(
                      FS_SYNC_FL))
                return -EOPNOTSUPP;
 
-       mask = FSX_XFLAGS;
-       if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-               mask |= FSX_NONBLOCK;
        fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
 
        error = mnt_want_write_file(filp);
        if (error)
                return error;
-       error = xfs_ioctl_setattr(ip, &fa, mask);
+
+       tp = xfs_ioctl_setattr_get_trans(ip);
+       if (IS_ERR(tp)) {
+               error = PTR_ERR(tp);
+               goto out_drop_write;
+       }
+
+       error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_drop_write;
+       }
+
+       error = xfs_trans_commit(tp, 0);
+out_drop_write:
        mnt_drop_write_file(filp);
        return error;
 }
index ec67728..bfc7c7c 100644 (file)
@@ -423,7 +423,7 @@ xfs_compat_attrmulti_by_handle(
 
        ops = memdup_user(compat_ptr(am_hreq.ops), size);
        if (IS_ERR(ops)) {
-               error = -PTR_ERR(ops);
+               error = PTR_ERR(ops);
                goto out_dput;
        }
 
index c980e2a..ccb1dd0 100644 (file)
@@ -802,7 +802,7 @@ int
 xfs_iomap_write_unwritten(
        xfs_inode_t     *ip,
        xfs_off_t       offset,
-       size_t          count)
+       xfs_off_t       count)
 {
        xfs_mount_t     *mp = ip->i_mount;
        xfs_fileoff_t   offset_fsb;
index 411fbb8..8688e66 100644 (file)
@@ -27,6 +27,6 @@ int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *);
 int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
                        struct xfs_bmbt_irec *);
-int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
 
 #endif /* __XFS_IOMAP_H__*/
index c50311c..ce80eeb 100644 (file)
@@ -380,18 +380,27 @@ xfs_vn_rename(
        struct inode    *odir,
        struct dentry   *odentry,
        struct inode    *ndir,
-       struct dentry   *ndentry)
+       struct dentry   *ndentry,
+       unsigned int    flags)
 {
        struct inode    *new_inode = ndentry->d_inode;
+       int             omode = 0;
        struct xfs_name oname;
        struct xfs_name nname;
 
-       xfs_dentry_to_name(&oname, odentry, 0);
+       if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+               return -EINVAL;
+
+       /* if we are exchanging files, we need to set i_mode of both files */
+       if (flags & RENAME_EXCHANGE)
+               omode = ndentry->d_inode->i_mode;
+
+       xfs_dentry_to_name(&oname, odentry, omode);
        xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode);
 
        return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
-                         XFS_I(ndir), &nname, new_inode ?
-                                               XFS_I(new_inode) : NULL);
+                         XFS_I(ndir), &nname,
+                         new_inode ? XFS_I(new_inode) : NULL, flags);
 }
 
 /*
@@ -1144,7 +1153,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
         */
        .rmdir                  = xfs_vn_unlink,
        .mknod                  = xfs_vn_mknod,
-       .rename                 = xfs_vn_rename,
+       .rename2                = xfs_vn_rename,
        .get_acl                = xfs_get_acl,
        .set_acl                = xfs_set_acl,
        .getattr                = xfs_vn_getattr,
@@ -1172,7 +1181,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
         */
        .rmdir                  = xfs_vn_unlink,
        .mknod                  = xfs_vn_mknod,
-       .rename                 = xfs_vn_rename,
+       .rename2                = xfs_vn_rename,
        .get_acl                = xfs_get_acl,
        .set_acl                = xfs_set_acl,
        .getattr                = xfs_vn_getattr,
index e408bf5..bcc7cfa 100644 (file)
@@ -33,6 +33,7 @@
 #include "xfs_fsops.h"
 #include "xfs_cksum.h"
 #include "xfs_sysfs.h"
+#include "xfs_sb.h"
 
 kmem_zone_t    *xfs_log_ticket_zone;
 
@@ -1290,9 +1291,20 @@ xfs_log_worker(
        struct xfs_mount        *mp = log->l_mp;
 
        /* dgc: errors ignored - not fatal and nowhere to report them */
-       if (xfs_log_need_covered(mp))
-               xfs_fs_log_dummy(mp);
-       else
+       if (xfs_log_need_covered(mp)) {
+               /*
+                * Dump a transaction into the log that contains no real change.
+                * This is needed to stamp the current tail LSN into the log
+                * during the covering operation.
+                *
+                * We cannot use an inode here for this - that will push dirty
+                * state back up into the VFS and then periodic inode flushing
+                * will prevent log covering from making progress. Hence we
+                * synchronously log the superblock instead to ensure the
+                * superblock is immediately unpinned and can be written back.
+                */
+               xfs_sync_sb(mp, true);
+       } else
                xfs_log_force(mp, 0);
 
        /* start pushing all the metadata that is currently dirty */
@@ -1395,6 +1407,8 @@ xlog_alloc_log(
        ASSERT(xfs_buf_islocked(bp));
        xfs_buf_unlock(bp);
 
+       /* use high priority wq for log I/O completion */
+       bp->b_ioend_wq = mp->m_log_workqueue;
        bp->b_iodone = xlog_iodone;
        log->l_xbuf = bp;
 
@@ -1427,6 +1441,8 @@ xlog_alloc_log(
                ASSERT(xfs_buf_islocked(bp));
                xfs_buf_unlock(bp);
 
+               /* use high priority wq for log I/O completion */
+               bp->b_ioend_wq = mp->m_log_workqueue;
                bp->b_iodone = xlog_iodone;
                iclog->ic_bp = bp;
                iclog->ic_data = bp->b_addr;
@@ -1806,8 +1822,6 @@ xlog_sync(
        XFS_BUF_ZEROFLAGS(bp);
        XFS_BUF_ASYNC(bp);
        bp->b_flags |= XBF_SYNCIO;
-       /* use high priority completion wq */
-       bp->b_ioend_wq = log->l_mp->m_log_workqueue;
 
        if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
                bp->b_flags |= XBF_FUA;
@@ -1856,8 +1870,6 @@ xlog_sync(
                bp->b_flags |= XBF_SYNCIO;
                if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
                        bp->b_flags |= XBF_FUA;
-               /* use high priority completion wq */
-               bp->b_ioend_wq = log->l_mp->m_log_workqueue;
 
                ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
                ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -2027,7 +2039,7 @@ xlog_print_tic_res(
                "  total reg   = %u bytes (o/flow = %u bytes)\n"
                "  ophdrs      = %u (ophdr space = %u bytes)\n"
                "  ophdr + reg = %u bytes\n"
-               "  num regions = %u\n",
+               "  num regions = %u",
                ((ticket->t_trans_type <= 0 ||
                  ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
                  "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
index d3d3883..4fa80e6 100644 (file)
@@ -408,11 +408,11 @@ xfs_update_alignment(xfs_mount_t *mp)
                if (xfs_sb_version_hasdalign(sbp)) {
                        if (sbp->sb_unit != mp->m_dalign) {
                                sbp->sb_unit = mp->m_dalign;
-                               mp->m_update_flags |= XFS_SB_UNIT;
+                               mp->m_update_sb = true;
                        }
                        if (sbp->sb_width != mp->m_swidth) {
                                sbp->sb_width = mp->m_swidth;
-                               mp->m_update_flags |= XFS_SB_WIDTH;
+                               mp->m_update_sb = true;
                        }
                } else {
                        xfs_warn(mp,
@@ -583,38 +583,19 @@ int
 xfs_mount_reset_sbqflags(
        struct xfs_mount        *mp)
 {
-       int                     error;
-       struct xfs_trans        *tp;
-
        mp->m_qflags = 0;
 
-       /*
-        * It is OK to look at sb_qflags here in mount path,
-        * without m_sb_lock.
-        */
+       /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
        if (mp->m_sb.sb_qflags == 0)
                return 0;
        spin_lock(&mp->m_sb_lock);
        mp->m_sb.sb_qflags = 0;
        spin_unlock(&mp->m_sb_lock);
 
-       /*
-        * If the fs is readonly, let the incore superblock run
-        * with quotas off but don't flush the update out to disk
-        */
-       if (mp->m_flags & XFS_MOUNT_RDONLY)
+       if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
                return 0;
 
-       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               xfs_alert(mp, "%s: Superblock update failed!", __func__);
-               return error;
-       }
-
-       xfs_mod_sb(tp, XFS_SB_QFLAGS);
-       return xfs_trans_commit(tp, 0);
+       return xfs_sync_sb(mp, false);
 }
 
 __uint64_t
@@ -659,26 +640,25 @@ xfs_mountfs(
        xfs_sb_mount_common(mp, sbp);
 
        /*
-        * Check for a mismatched features2 values.  Older kernels
-        * read & wrote into the wrong sb offset for sb_features2
-        * on some platforms due to xfs_sb_t not being 64bit size aligned
-        * when sb_features2 was added, which made older superblock
-        * reading/writing routines swap it as a 64-bit value.
+        * Check for a mismatched features2 values.  Older kernels read & wrote
+        * into the wrong sb offset for sb_features2 on some platforms due to
+        * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
+        * which made older superblock reading/writing routines swap it as a
+        * 64-bit value.
         *
         * For backwards compatibility, we make both slots equal.
         *
-        * If we detect a mismatched field, we OR the set bits into the
-        * existing features2 field in case it has already been modified; we
-        * don't want to lose any features.  We then update the bad location
-        * with the ORed value so that older kernels will see any features2
-        * flags, and mark the two fields as needing updates once the
-        * transaction subsystem is online.
+        * If we detect a mismatched field, we OR the set bits into the existing
+        * features2 field in case it has already been modified; we don't want
+        * to lose any features.  We then update the bad location with the ORed
+        * value so that older kernels will see any features2 flags. The
+        * superblock writeback code ensures the new sb_features2 is copied to
+        * sb_bad_features2 before it is logged or written to disk.
         */
        if (xfs_sb_has_mismatched_features2(sbp)) {
                xfs_warn(mp, "correcting sb_features alignment problem");
                sbp->sb_features2 |= sbp->sb_bad_features2;
-               sbp->sb_bad_features2 = sbp->sb_features2;
-               mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
+               mp->m_update_sb = true;
 
                /*
                 * Re-check for ATTR2 in case it was found in bad_features2
@@ -692,17 +672,17 @@ xfs_mountfs(
        if (xfs_sb_version_hasattr2(&mp->m_sb) &&
           (mp->m_flags & XFS_MOUNT_NOATTR2)) {
                xfs_sb_version_removeattr2(&mp->m_sb);
-               mp->m_update_flags |= XFS_SB_FEATURES2;
+               mp->m_update_sb = true;
 
                /* update sb_versionnum for the clearing of the morebits */
                if (!sbp->sb_features2)
-                       mp->m_update_flags |= XFS_SB_VERSIONNUM;
+                       mp->m_update_sb = true;
        }
 
        /* always use v2 inodes by default now */
        if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
                mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
-               mp->m_update_flags |= XFS_SB_VERSIONNUM;
+               mp->m_update_sb = true;
        }
 
        /*
@@ -895,8 +875,8 @@ xfs_mountfs(
         * the next remount into writeable mode.  Otherwise we would never
         * perform the update e.g. for the root filesystem.
         */
-       if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
-               error = xfs_mount_log_sb(mp, mp->m_update_flags);
+       if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+               error = xfs_sync_sb(mp, false);
                if (error) {
                        xfs_warn(mp, "failed to write sb changes");
                        goto out_rtunmount;
@@ -1103,9 +1083,6 @@ xfs_fs_writable(
 int
 xfs_log_sbcount(xfs_mount_t *mp)
 {
-       xfs_trans_t     *tp;
-       int             error;
-
        /* allow this to proceed during the freeze sequence... */
        if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
                return 0;
@@ -1119,17 +1096,7 @@ xfs_log_sbcount(xfs_mount_t *mp)
        if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
                return 0;
 
-       tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
-       xfs_trans_set_sync(tp);
-       error = xfs_trans_commit(tp, 0);
-       return error;
+       return xfs_sync_sb(mp, true);
 }
 
 /*
@@ -1422,34 +1389,6 @@ xfs_freesb(
        xfs_buf_relse(bp);
 }
 
-/*
- * Used to log changes to the superblock unit and width fields which could
- * be altered by the mount options, as well as any potential sb_features2
- * fixup. Only the first superblock is updated.
- */
-int
-xfs_mount_log_sb(
-       xfs_mount_t     *mp,
-       __int64_t       fields)
-{
-       xfs_trans_t     *tp;
-       int             error;
-
-       ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
-                        XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
-                        XFS_SB_VERSIONNUM));
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-       xfs_mod_sb(tp, fields);
-       error = xfs_trans_commit(tp, 0);
-       return error;
-}
-
 /*
  * If the underlying (data/log/rt) device is readonly, there are some
  * operations that cannot proceed.
index 22ccf69..a5b2ff8 100644 (file)
@@ -162,8 +162,7 @@ typedef struct xfs_mount {
        struct delayed_work     m_reclaim_work; /* background inode reclaim */
        struct delayed_work     m_eofblocks_work; /* background eof blocks
                                                     trimming */
-       __int64_t               m_update_flags; /* sb flags we need to update
-                                                  on the next remount,rw */
+       bool                    m_update_sb;    /* sb needs update in mount */
        int64_t                 m_low_space[XFS_LOWSP_MAX];
                                                /* low free space thresholds */
        struct xfs_kobj         m_kobj;
@@ -378,7 +377,7 @@ extern void xfs_unmountfs(xfs_mount_t *);
 extern int     xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
 extern int     xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
                        uint, int);
-extern int     xfs_mount_log_sb(xfs_mount_t *, __int64_t);
+extern int     xfs_mount_log_sb(xfs_mount_t *);
 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
 extern int     xfs_readsb(xfs_mount_t *, int);
 extern void    xfs_freesb(xfs_mount_t *);
index 79fb19d..3e81862 100644 (file)
@@ -714,7 +714,6 @@ STATIC int
 xfs_qm_qino_alloc(
        xfs_mount_t     *mp,
        xfs_inode_t     **ip,
-       __int64_t       sbfields,
        uint            flags)
 {
        xfs_trans_t     *tp;
@@ -777,11 +776,6 @@ xfs_qm_qino_alloc(
        spin_lock(&mp->m_sb_lock);
        if (flags & XFS_QMOPT_SBVERSION) {
                ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
-               ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                       XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
-                               (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                                XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
-                                XFS_SB_QFLAGS));
 
                xfs_sb_version_addquota(&mp->m_sb);
                mp->m_sb.sb_uquotino = NULLFSINO;
@@ -798,7 +792,7 @@ xfs_qm_qino_alloc(
        else
                mp->m_sb.sb_pquotino = (*ip)->i_ino;
        spin_unlock(&mp->m_sb_lock);
-       xfs_mod_sb(tp, sbfields);
+       xfs_log_sb(tp);
 
        if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
                xfs_alert(mp, "%s failed (error %d)!", __func__, error);
@@ -1451,7 +1445,7 @@ xfs_qm_mount_quotas(
        spin_unlock(&mp->m_sb_lock);
 
        if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
-               if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
+               if (xfs_sync_sb(mp, false)) {
                        /*
                         * We could only have been turning quotas off.
                         * We aren't in very good shape actually because
@@ -1482,7 +1476,6 @@ xfs_qm_init_quotainos(
        struct xfs_inode        *gip = NULL;
        struct xfs_inode        *pip = NULL;
        int                     error;
-       __int64_t               sbflags = 0;
        uint                    flags = 0;
 
        ASSERT(mp->m_quotainfo);
@@ -1517,9 +1510,6 @@ xfs_qm_init_quotainos(
                }
        } else {
                flags |= XFS_QMOPT_SBVERSION;
-               sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
-                           XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
-                           XFS_SB_QFLAGS);
        }
 
        /*
@@ -1530,7 +1520,6 @@ xfs_qm_init_quotainos(
         */
        if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
                error = xfs_qm_qino_alloc(mp, &uip,
-                                             sbflags | XFS_SB_UQUOTINO,
                                              flags | XFS_QMOPT_UQUOTA);
                if (error)
                        goto error_rele;
@@ -1539,7 +1528,6 @@ xfs_qm_init_quotainos(
        }
        if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
                error = xfs_qm_qino_alloc(mp, &gip,
-                                         sbflags | XFS_SB_GQUOTINO,
                                          flags | XFS_QMOPT_GQUOTA);
                if (error)
                        goto error_rele;
@@ -1548,7 +1536,6 @@ xfs_qm_init_quotainos(
        }
        if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
                error = xfs_qm_qino_alloc(mp, &pip,
-                                         sbflags | XFS_SB_PQUOTINO,
                                          flags | XFS_QMOPT_PQUOTA);
                if (error)
                        goto error_rele;
@@ -1587,32 +1574,6 @@ xfs_qm_dqfree_one(
        xfs_qm_dqdestroy(dqp);
 }
 
-/*
- * Start a transaction and write the incore superblock changes to
- * disk. flags parameter indicates which fields have changed.
- */
-int
-xfs_qm_write_sb_changes(
-       xfs_mount_t     *mp,
-       __int64_t       flags)
-{
-       xfs_trans_t     *tp;
-       int             error;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_mod_sb(tp, flags);
-       error = xfs_trans_commit(tp, 0);
-
-       return error;
-}
-
-
 /* --------------- utility functions for vnodeops ---------------- */
 
 
index 41f6c0b..0d4d359 100644 (file)
@@ -157,7 +157,6 @@ struct xfs_dquot_acct {
 #define XFS_QM_RTBWARNLIMIT    5
 
 extern void            xfs_qm_destroy_quotainfo(struct xfs_mount *);
-extern int             xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t);
 
 /* dquot stuff */
 extern void            xfs_qm_dqpurge_all(struct xfs_mount *, uint);
index cb6168e..9b965db 100644 (file)
@@ -91,8 +91,7 @@ xfs_qm_scall_quotaoff(
                mutex_unlock(&q->qi_quotaofflock);
 
                /* XXX what to do if error ? Revert back to old vals incore ? */
-               error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
-               return error;
+               return xfs_sync_sb(mp, false);
        }
 
        dqtype = 0;
@@ -313,7 +312,6 @@ xfs_qm_scall_quotaon(
 {
        int             error;
        uint            qf;
-       __int64_t       sbflags;
 
        flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
        /*
@@ -321,30 +319,22 @@ xfs_qm_scall_quotaon(
         */
        flags &= ~(XFS_ALL_QUOTA_ACCT);
 
-       sbflags = 0;
-
        if (flags == 0) {
                xfs_debug(mp, "%s: zero flags, m_qflags=%x",
                        __func__, mp->m_qflags);
                return -EINVAL;
        }
 
-       /* No fs can turn on quotas with a delayed effect */
-       ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
-
        /*
         * Can't enforce without accounting. We check the superblock
         * qflags here instead of m_qflags because rootfs can have
         * quota acct on ondisk without m_qflags' knowing.
         */
-       if (((flags & XFS_UQUOTA_ACCT) == 0 &&
-            (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
+       if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
             (flags & XFS_UQUOTA_ENFD)) ||
-           ((flags & XFS_GQUOTA_ACCT) == 0 &&
-            (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
+           ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
             (flags & XFS_GQUOTA_ENFD)) ||
-           ((flags & XFS_PQUOTA_ACCT) == 0 &&
-            (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
+           ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
             (flags & XFS_PQUOTA_ENFD))) {
                xfs_debug(mp,
                        "%s: Can't enforce without acct, flags=%x sbflags=%x",
@@ -369,11 +359,11 @@ xfs_qm_scall_quotaon(
        /*
         * There's nothing to change if it's the same.
         */
-       if ((qf & flags) == flags && sbflags == 0)
+       if ((qf & flags) == flags)
                return -EEXIST;
-       sbflags |= XFS_SB_QFLAGS;
 
-       if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
+       error = xfs_sync_sb(mp, false);
+       if (error)
                return error;
        /*
         * If we aren't trying to switch on quota enforcement, we are done.
@@ -383,8 +373,7 @@ xfs_qm_scall_quotaon(
             ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
             (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
             ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
-            (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
-           (flags & XFS_ALL_QUOTA_ENFD) == 0)
+            (mp->m_qflags & XFS_GQUOTA_ACCT)))
                return 0;
 
        if (! XFS_IS_QUOTA_RUNNING(mp))
@@ -421,20 +410,12 @@ xfs_qm_scall_getqstat(
        memset(out, 0, sizeof(fs_quota_stat_t));
 
        out->qs_version = FS_QSTAT_VERSION;
-       if (!xfs_sb_version_hasquota(&mp->m_sb)) {
-               out->qs_uquota.qfs_ino = NULLFSINO;
-               out->qs_gquota.qfs_ino = NULLFSINO;
-               return 0;
-       }
-
        out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
                                                        (XFS_ALL_QUOTA_ACCT|
                                                         XFS_ALL_QUOTA_ENFD));
-       if (q) {
-               uip = q->qi_uquotaip;
-               gip = q->qi_gquotaip;
-               pip = q->qi_pquotaip;
-       }
+       uip = q->qi_uquotaip;
+       gip = q->qi_gquotaip;
+       pip = q->qi_pquotaip;
        if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
                if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
                                        0, 0, &uip) == 0)
@@ -480,14 +461,13 @@ xfs_qm_scall_getqstat(
                if (temppqip)
                        IRELE(pip);
        }
-       if (q) {
-               out->qs_incoredqs = q->qi_dquots;
-               out->qs_btimelimit = q->qi_btimelimit;
-               out->qs_itimelimit = q->qi_itimelimit;
-               out->qs_rtbtimelimit = q->qi_rtbtimelimit;
-               out->qs_bwarnlimit = q->qi_bwarnlimit;
-               out->qs_iwarnlimit = q->qi_iwarnlimit;
-       }
+       out->qs_incoredqs = q->qi_dquots;
+       out->qs_btimelimit = q->qi_btimelimit;
+       out->qs_itimelimit = q->qi_itimelimit;
+       out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+       out->qs_bwarnlimit = q->qi_bwarnlimit;
+       out->qs_iwarnlimit = q->qi_iwarnlimit;
+
        return 0;
 }
 
@@ -508,13 +488,6 @@ xfs_qm_scall_getqstatv(
        bool                    tempgqip = false;
        bool                    temppqip = false;
 
-       if (!xfs_sb_version_hasquota(&mp->m_sb)) {
-               out->qs_uquota.qfs_ino = NULLFSINO;
-               out->qs_gquota.qfs_ino = NULLFSINO;
-               out->qs_pquota.qfs_ino = NULLFSINO;
-               return 0;
-       }
-
        out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
                                                        (XFS_ALL_QUOTA_ACCT|
                                                         XFS_ALL_QUOTA_ENFD));
@@ -522,11 +495,9 @@ xfs_qm_scall_getqstatv(
        out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
        out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;
 
-       if (q) {
-               uip = q->qi_uquotaip;
-               gip = q->qi_gquotaip;
-               pip = q->qi_pquotaip;
-       }
+       uip = q->qi_uquotaip;
+       gip = q->qi_gquotaip;
+       pip = q->qi_pquotaip;
        if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
                if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
                                        0, 0, &uip) == 0)
@@ -561,14 +532,13 @@ xfs_qm_scall_getqstatv(
                if (temppqip)
                        IRELE(pip);
        }
-       if (q) {
-               out->qs_incoredqs = q->qi_dquots;
-               out->qs_btimelimit = q->qi_btimelimit;
-               out->qs_itimelimit = q->qi_itimelimit;
-               out->qs_rtbtimelimit = q->qi_rtbtimelimit;
-               out->qs_bwarnlimit = q->qi_bwarnlimit;
-               out->qs_iwarnlimit = q->qi_iwarnlimit;
-       }
+       out->qs_incoredqs = q->qi_dquots;
+       out->qs_btimelimit = q->qi_btimelimit;
+       out->qs_itimelimit = q->qi_itimelimit;
+       out->qs_rtbtimelimit = q->qi_rtbtimelimit;
+       out->qs_bwarnlimit = q->qi_bwarnlimit;
+       out->qs_iwarnlimit = q->qi_iwarnlimit;
+
        return 0;
 }
 
@@ -800,7 +770,7 @@ xfs_qm_log_quotaoff(
        mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
        spin_unlock(&mp->m_sb_lock);
 
-       xfs_mod_sb(tp, XFS_SB_QFLAGS);
+       xfs_log_sb(tp);
 
        /*
         * We have to make sure that the transaction is secure on disk before we
index 801a84c..6923905 100644 (file)
@@ -64,19 +64,10 @@ xfs_fs_get_xstatev(
        return xfs_qm_scall_getqstatv(mp, fqs);
 }
 
-STATIC int
-xfs_fs_set_xstate(
-       struct super_block      *sb,
-       unsigned int            uflags,
-       int                     op)
+static unsigned int
+xfs_quota_flags(unsigned int uflags)
 {
-       struct xfs_mount        *mp = XFS_M(sb);
-       unsigned int            flags = 0;
-
-       if (sb->s_flags & MS_RDONLY)
-               return -EROFS;
-       if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp))
-               return -ENOSYS;
+       unsigned int flags = 0;
 
        if (uflags & FS_QUOTA_UDQ_ACCT)
                flags |= XFS_UQUOTA_ACCT;
@@ -91,16 +82,39 @@ xfs_fs_set_xstate(
        if (uflags & FS_QUOTA_PDQ_ENFD)
                flags |= XFS_PQUOTA_ENFD;
 
-       switch (op) {
-       case Q_XQUOTAON:
-               return xfs_qm_scall_quotaon(mp, flags);
-       case Q_XQUOTAOFF:
-               if (!XFS_IS_QUOTA_ON(mp))
-                       return -EINVAL;
-               return xfs_qm_scall_quotaoff(mp, flags);
-       }
+       return flags;
+}
+
+STATIC int
+xfs_quota_enable(
+       struct super_block      *sb,
+       unsigned int            uflags)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       if (sb->s_flags & MS_RDONLY)
+               return -EROFS;
+       if (!XFS_IS_QUOTA_RUNNING(mp))
+               return -ENOSYS;
+
+       return xfs_qm_scall_quotaon(mp, xfs_quota_flags(uflags));
+}
+
+STATIC int
+xfs_quota_disable(
+       struct super_block      *sb,
+       unsigned int            uflags)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       if (sb->s_flags & MS_RDONLY)
+               return -EROFS;
+       if (!XFS_IS_QUOTA_RUNNING(mp))
+               return -ENOSYS;
+       if (!XFS_IS_QUOTA_ON(mp))
+               return -EINVAL;
 
-       return -EINVAL;
+       return xfs_qm_scall_quotaoff(mp, xfs_quota_flags(uflags));
 }
 
 STATIC int
@@ -166,7 +180,8 @@ xfs_fs_set_dqblk(
 const struct quotactl_ops xfs_quotactl_operations = {
        .get_xstatev            = xfs_fs_get_xstatev,
        .get_xstate             = xfs_fs_get_xstate,
-       .set_xstate             = xfs_fs_set_xstate,
+       .quota_enable           = xfs_quota_enable,
+       .quota_disable          = xfs_quota_disable,
        .rm_xquota              = xfs_fs_rm_xquota,
        .get_dqblk              = xfs_fs_get_dqblk,
        .set_dqblk              = xfs_fs_set_dqblk,
index 19cbda1..f2449fd 100644 (file)
@@ -685,7 +685,7 @@ xfs_blkdev_get(
                                    mp);
        if (IS_ERR(*bdevp)) {
                error = PTR_ERR(*bdevp);
-               xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
+               xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
        }
 
        return error;
@@ -1111,6 +1111,11 @@ xfs_fs_statfs(
                                        statp->f_files,
                                        mp->m_maxicount);
 
+       /* If sb_icount overshot maxicount, report actual allocation */
+       statp->f_files = max_t(typeof(statp->f_files),
+                                       statp->f_files,
+                                       sbp->sb_icount);
+
        /* make sure statp->f_ffree does not underflow */
        ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
        statp->f_ffree = max_t(__int64_t, ffree, 0);
@@ -1257,13 +1262,13 @@ xfs_fs_remount(
                 * If this is the first remount to writeable state we
                 * might have some superblock changes to update.
                 */
-               if (mp->m_update_flags) {
-                       error = xfs_mount_log_sb(mp, mp->m_update_flags);
+               if (mp->m_update_sb) {
+                       error = xfs_sync_sb(mp, false);
                        if (error) {
                                xfs_warn(mp, "failed to write sb changes");
                                return error;
                        }
-                       mp->m_update_flags = 0;
+                       mp->m_update_sb = false;
                }
 
                /*
@@ -1293,8 +1298,9 @@ xfs_fs_remount(
 
 /*
  * Second stage of a freeze. The data is already frozen so we only
- * need to take care of the metadata. Once that's done write a dummy
- * record to dirty the log in case of a crash while frozen.
+ * need to take care of the metadata. Once that's done sync the superblock
+ * to the log to dirty it in case of a crash while frozen. This ensures that we
+ * will recover the unlinked inode lists on the next mount.
  */
 STATIC int
 xfs_fs_freeze(
@@ -1304,7 +1310,7 @@ xfs_fs_freeze(
 
        xfs_save_resvblks(mp);
        xfs_quiesce_attr(mp);
-       return xfs_fs_log_dummy(mp);
+       return xfs_sync_sb(mp, true);
 }
 
 STATIC int
index 1743b9f..a0c8067 100644 (file)
@@ -148,24 +148,6 @@ static struct ctl_table xfs_table[] = {
                .extra1         = &xfs_params.inherit_noatim.min,
                .extra2         = &xfs_params.inherit_noatim.max
        },
-       {
-               .procname       = "xfsbufd_centisecs",
-               .data           = &xfs_params.xfs_buf_timer.val,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &xfs_params.xfs_buf_timer.min,
-               .extra2         = &xfs_params.xfs_buf_timer.max
-       },
-       {
-               .procname       = "age_buffer_centisecs",
-               .data           = &xfs_params.xfs_buf_age.val,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &xfs_params.xfs_buf_age.min,
-               .extra2         = &xfs_params.xfs_buf_age.max
-       },
        {
                .procname       = "inherit_nosymlinks",
                .data           = &xfs_params.inherit_nosym.val,
index fa3135b..eb90cd5 100644 (file)
@@ -472,6 +472,7 @@ xfs_trans_apply_sb_deltas(
                whole = 1;
        }
 
+       xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
        if (whole)
                /*
                 * Log the whole thing, the fields are noncontiguous.
index 0a4d4ab..7579841 100644 (file)
@@ -327,9 +327,10 @@ xfs_trans_read_buf_map(
                return -EIO;
        }
 
-       if (tp)
+       if (tp) {
                _xfs_trans_bjoin(tp, bp, 1);
-       trace_xfs_trans_read_buf(bp->b_fspriv);
+               trace_xfs_trans_read_buf(bp->b_fspriv);
+       }
        *bpp = bp;
        return 0;
 
index d5ec6c8..6b040f4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5a0a3e5..03aacfb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8b06e4c..11c3a01 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7461327..273de70 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1baae6e..9318a87 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a08e55a..b0bb30e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 03b3e6d..0bc78df 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5ba7846..d56f5d7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20141107
+#define ACPI_CA_VERSION                 0x20150204
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -568,6 +568,14 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                                          acpi_gpe_handler
                                                          address,
                                                          void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_install_gpe_raw_handler(acpi_handle
+                                                             gpe_device,
+                                                             u32 gpe_number,
+                                                             u32 type,
+                                                             acpi_gpe_handler
+                                                             address,
+                                                             void *context))
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                 acpi_remove_gpe_handler(acpi_handle gpe_device,
                                                         u32 gpe_number,
@@ -890,12 +898,6 @@ ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
  */
 ACPI_GLOBAL(u8, acpi_gbl_permanent_mmap);
 
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-                           acpi_get_id(acpi_handle object,
-                                       acpi_owner_id * out_type))
-
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_unload_table_id(acpi_owner_id id))
-
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
                            acpi_get_table_with_size(acpi_string signature,
                                                     u32 instance,
index eb760ca..ebe2426 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -305,43 +305,51 @@ struct acpi_resource_source {
        u8                                      max_address_fixed; \
        union acpi_resource_attribute           info;
 
-struct acpi_resource_address {
-ACPI_RESOURCE_ADDRESS_COMMON};
-
-struct acpi_resource_address16 {
-       ACPI_RESOURCE_ADDRESS_COMMON u16 granularity;
+struct acpi_address16_attribute {
+       u16 granularity;
        u16 minimum;
        u16 maximum;
        u16 translation_offset;
        u16 address_length;
-       struct acpi_resource_source resource_source;
 };
 
-struct acpi_resource_address32 {
-       ACPI_RESOURCE_ADDRESS_COMMON u32 granularity;
+struct acpi_address32_attribute {
+       u32 granularity;
        u32 minimum;
        u32 maximum;
        u32 translation_offset;
        u32 address_length;
-       struct acpi_resource_source resource_source;
 };
 
-struct acpi_resource_address64 {
-       ACPI_RESOURCE_ADDRESS_COMMON u64 granularity;
+struct acpi_address64_attribute {
+       u64 granularity;
        u64 minimum;
        u64 maximum;
        u64 translation_offset;
        u64 address_length;
+};
+
+struct acpi_resource_address {
+ACPI_RESOURCE_ADDRESS_COMMON};
+
+struct acpi_resource_address16 {
+       ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address16_attribute address;
+       struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address32 {
+       ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address32_attribute address;
+       struct acpi_resource_source resource_source;
+};
+
+struct acpi_resource_address64 {
+       ACPI_RESOURCE_ADDRESS_COMMON struct acpi_address64_attribute address;
        struct acpi_resource_source resource_source;
 };
 
 struct acpi_resource_extended_address64 {
        ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
-       u64 granularity;
-       u64 minimum;
-       u64 maximum;
-       u64 translation_offset;
-       u64 address_length;
+       struct acpi_address64_attribute address;
        u64 type_specific;
 };
 
index bee19d8..d4081fe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 29e7937..b80b0e6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ecff624..f06d75e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5480cb2..440ca81 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bbef173..b034f10 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ typedef u32 acpi_event_status;
 /*
  * GPE info flags - Per GPE
  * +-------+-+-+---+
- * |  7:4  |3|2|1:0|
+ * |  7:5  |4|3|2:0|
  * +-------+-+-+---+
  *     |    | |  |
  *     |    | |  +-- Type of dispatch:to method, handler, notify, or none
@@ -756,13 +756,15 @@ typedef u32 acpi_event_status;
 #define ACPI_GPE_DISPATCH_METHOD        (u8) 0x01
 #define ACPI_GPE_DISPATCH_HANDLER       (u8) 0x02
 #define ACPI_GPE_DISPATCH_NOTIFY        (u8) 0x03
-#define ACPI_GPE_DISPATCH_MASK          (u8) 0x03
+#define ACPI_GPE_DISPATCH_RAW_HANDLER   (u8) 0x04
+#define ACPI_GPE_DISPATCH_MASK          (u8) 0x07
+#define ACPI_GPE_DISPATCH_TYPE(flags)   ((u8) ((flags) & ACPI_GPE_DISPATCH_MASK))
 
-#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x04
+#define ACPI_GPE_LEVEL_TRIGGERED        (u8) 0x08
 #define ACPI_GPE_EDGE_TRIGGERED         (u8) 0x00
-#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x04
+#define ACPI_GPE_XRUPT_TYPE_MASK        (u8) 0x08
 
-#define ACPI_GPE_CAN_WAKE               (u8) 0x08
+#define ACPI_GPE_CAN_WAKE               (u8) 0x10
 
 /*
  * Flags for GPE and Lock interfaces
index 5f8cc1f..ad74dc5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2b61238..71e5ec5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 384875d..f54de0a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1ba7c19..74ba46c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 568d4b8..acedc3f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d459cd1..24c7aa8 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <linux/errno.h>
 #include <linux/ioport.h>      /* for struct resource */
+#include <linux/resource_ext.h>
 #include <linux/device.h>
 #include <linux/property.h>
 
@@ -151,6 +152,10 @@ int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
 int acpi_unmap_cpu(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
+#endif
+
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
 int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
@@ -288,22 +293,25 @@ extern int pnpacpi_disabled;
 bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
 bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
 bool acpi_dev_resource_address_space(struct acpi_resource *ares,
-                                    struct resource *res);
+                                    struct resource_win *win);
 bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
-                                        struct resource *res);
+                                        struct resource_win *win);
 unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
 bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                                 struct resource *res);
 
-struct resource_list_entry {
-       struct list_head node;
-       struct resource res;
-};
-
 void acpi_dev_free_resource_list(struct list_head *list);
 int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
                           int (*preproc)(struct acpi_resource *, void *),
                           void *preproc_data);
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+                                 unsigned long types);
+
+static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
+                                                  void *arg)
+{
+       return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
+}
 
 int acpi_check_resource_conflict(const struct resource *res);
 
index 4d078ce..2ee4888 100644 (file)
@@ -66,8 +66,6 @@ struct cpufreq_policy {
        unsigned int            shared_type; /* ACPI: ANY or ALL affected CPUs
                                                should set cpufreq */
        unsigned int            cpu;    /* cpu nr of CPU managing this policy */
-       unsigned int            last_cpu; /* cpu nr of previous CPU that managed
-                                          * this policy */
        struct clk              *clk;
        struct cpufreq_cpuinfo  cpuinfo;/* see above */
 
@@ -113,6 +111,9 @@ struct cpufreq_policy {
        wait_queue_head_t       transition_wait;
        struct task_struct      *transition_task; /* Task which is doing the transition */
 
+       /* cpufreq-stats */
+       struct cpufreq_stats    *stats;
+
        /* For cpufreq driver's internal use */
        void                    *driver_data;
 };
@@ -367,9 +368,8 @@ static inline void cpufreq_resume(void) {}
 #define CPUFREQ_INCOMPATIBLE           (1)
 #define CPUFREQ_NOTIFY                 (2)
 #define CPUFREQ_START                  (3)
-#define CPUFREQ_UPDATE_POLICY_CPU      (4)
-#define CPUFREQ_CREATE_POLICY          (5)
-#define CPUFREQ_REMOVE_POLICY          (6)
+#define CPUFREQ_CREATE_POLICY          (4)
+#define CPUFREQ_REMOVE_POLICY          (5)
 
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644 (file)
index 0000000..602fbbf
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_EVENT_H__
+#define __LINUX_DEVFREQ_EVENT_H__
+
+#include <linux/device.h>
+
+/**
+ * struct devfreq_event_dev - the devfreq-event device
+ *
+ * @node       : Contain the devfreq-event device that have been registered.
+ * @dev                : the device registered by devfreq-event class. dev.parent is
+ *               the device using devfreq-event.
+ * @lock       : a mutex to protect accessing devfreq-event.
+ * @enable_count: the number of enable function have been called.
+ * @desc       : the description for devfreq-event device.
+ *
+ * This structure contains devfreq-event device information.
+ */
+struct devfreq_event_dev {
+       struct list_head node;
+
+       struct device dev;
+       struct mutex lock;
+       u32 enable_count;
+
+       const struct devfreq_event_desc *desc;
+};
+
+/**
+ * struct devfreq_event_data - the devfreq-event data
+ *
+ * @load_count : load count of devfreq-event device for the given period.
+ * @total_count        : total count of devfreq-event device for the given period.
+ *               each count may represent a clock cycle, a time unit
+ *               (ns/us/...), or anything the device driver wants.
+ *               Generally, utilization is load_count / total_count.
+ *
+ * This structure contains the data of devfreq-event device for polling period.
+ */
+struct devfreq_event_data {
+       unsigned long load_count;
+       unsigned long total_count;
+};
+
+/**
+ * struct devfreq_event_ops - the operations of devfreq-event device
+ *
+ * @enable     : Enable the devfreq-event device.
+ * @disable    : Disable the devfreq-event device.
+ * @reset      : Reset all setting of the devfreq-event device.
+ * @set_event  : Set the specific event type for the devfreq-event device.
+ * @get_event  : Get the result of the devfreq-event devie with specific
+ *               event type.
+ *
+ * This structure contains devfreq-event device operations which can be
+ * implemented by devfreq-event device drivers.
+ */
+struct devfreq_event_ops {
+       /* Optional functions */
+       int (*enable)(struct devfreq_event_dev *edev);
+       int (*disable)(struct devfreq_event_dev *edev);
+       int (*reset)(struct devfreq_event_dev *edev);
+
+       /* Mandatory functions */
+       int (*set_event)(struct devfreq_event_dev *edev);
+       int (*get_event)(struct devfreq_event_dev *edev,
+                        struct devfreq_event_data *edata);
+};
+
+/**
+ * struct devfreq_event_desc - the descriptor of devfreq-event device
+ *
+ * @name       : the name of devfreq-event device.
+ * @driver_data        : the private data for devfreq-event driver.
+ * @ops                : the operation to control devfreq-event device.
+ *
+ * Each devfreq-event device is described with a this structure.
+ * This structure contains the various data for devfreq-event device.
+ */
+struct devfreq_event_desc {
+       const char *name;
+       void *driver_data;
+
+       struct devfreq_event_ops *ops;
+};
+
+#if defined(CONFIG_PM_DEVFREQ_EVENT)
+extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
+extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
+extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
+extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
+extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
+                               struct devfreq_event_data *edata);
+extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+                               struct device *dev, int index);
+extern int devfreq_event_get_edev_count(struct device *dev);
+extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+                               struct devfreq_event_desc *desc);
+extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+                               struct devfreq_event_desc *desc);
+extern void devm_devfreq_event_remove_edev(struct device *dev,
+                               struct devfreq_event_dev *edev);
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+       return edev->desc->driver_data;
+}
+#else
+static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+       return -EINVAL;
+}
+
+static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+       return -EINVAL;
+}
+
+static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+       return false;
+}
+
+static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+       return -EINVAL;
+}
+
+static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
+                                       struct devfreq_event_data *edata)
+{
+       return -EINVAL;
+}
+
+static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+       return -EINVAL;
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+                                       struct device *dev, int index)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_get_edev_count(struct device *dev)
+{
+       return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+                                       struct devfreq_event_desc *desc)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+       return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
+                                       struct device *dev,
+                                       struct devfreq_event_desc *desc)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void devm_devfreq_event_remove_edev(struct device *dev,
+                                       struct devfreq_event_dev *edev)
+{
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+       return NULL;
+}
+#endif /* CONFIG_PM_DEVFREQ_EVENT */
+
+#endif /* __LINUX_DEVFREQ_EVENT_H__ */
index 3713a72..c0d4d1e 100644 (file)
@@ -5,9 +5,6 @@
 #ifndef _LINUX_DQBLK_V1_H
 #define _LINUX_DQBLK_V1_H
 
-/* Root squash turned on */
-#define V1_DQF_RSQUASH 1
-
 /* Numbers of blocks needed for updates */
 #define V1_INIT_ALLOC 1
 #define V1_INIT_REWRITE 1
index 60acab2..f125b88 100644 (file)
@@ -623,7 +623,7 @@ struct inode {
        atomic_t                i_readcount; /* struct files open RO */
 #endif
        const struct file_operations    *i_fop; /* former ->i_op->default_file_ops */
-       struct file_lock        *i_flock;
+       struct file_lock_context        *i_flctx;
        struct address_space    i_data;
        struct list_head        i_devices;
        union {
@@ -883,6 +883,8 @@ static inline struct file *get_file(struct file *f)
 /* legacy typedef, should eventually be removed */
 typedef void *fl_owner_t;
 
+struct file_lock;
+
 struct file_lock_operations {
        void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
        void (*fl_release_private)(struct file_lock *);
@@ -896,7 +898,7 @@ struct lock_manager_operations {
        void (*lm_notify)(struct file_lock *);  /* unblock callback */
        int (*lm_grant)(struct file_lock *, int);
        bool (*lm_break)(struct file_lock *);
-       int (*lm_change)(struct file_lock **, int, struct list_head *);
+       int (*lm_change)(struct file_lock *, int, struct list_head *);
        void (*lm_setup)(struct file_lock *, void **);
 };
 
@@ -921,17 +923,17 @@ int locks_in_grace(struct net *);
  * FIXME: should we create a separate "struct lock_request" to help distinguish
  * these two uses?
  *
- * The i_flock list is ordered by:
+ * The varous i_flctx lists are ordered by:
  *
- * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
- * 2) lock owner
- * 3) lock range start
- * 4) lock range end
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
  *
  * Obviously, the last two criteria only matter for POSIX locks.
  */
 struct file_lock {
        struct file_lock *fl_next;      /* singly linked list for this inode  */
+       struct list_head fl_list;       /* link into file_lock_context */
        struct hlist_node fl_link;      /* node in global lists */
        struct list_head fl_block;      /* circular list of blocked processes */
        fl_owner_t fl_owner;
@@ -962,6 +964,16 @@ struct file_lock {
        } fl_u;
 };
 
+struct file_lock_context {
+       spinlock_t              flc_lock;
+       struct list_head        flc_flock;
+       struct list_head        flc_posix;
+       struct list_head        flc_lease;
+       int                     flc_flock_cnt;
+       int                     flc_posix_cnt;
+       int                     flc_lease_cnt;
+};
+
 /* The following constant reflects the upper bound of the file/locking space */
 #ifndef OFFSET_MAX
 #define INT_LIMIT(x)   (~((x)1 << (sizeof(x)*8 - 1)))
@@ -988,6 +1000,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
 extern int fcntl_getlease(struct file *filp);
 
 /* fs/locks.c */
+void locks_free_lock_context(struct file_lock_context *ctx);
 void locks_free_lock(struct file_lock *fl);
 extern void locks_init_lock(struct file_lock *);
 extern struct file_lock * locks_alloc_lock(void);
@@ -1008,7 +1021,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
 extern void lease_get_mtime(struct inode *, struct timespec *time);
 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
 extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock **, int, struct list_head *);
+extern int lease_modify(struct file_lock *, int, struct list_head *);
 #else /* !CONFIG_FILE_LOCKING */
 static inline int fcntl_getlk(struct file *file, unsigned int cmd,
                              struct flock __user *user)
@@ -1045,6 +1058,11 @@ static inline int fcntl_getlease(struct file *filp)
        return F_UNLCK;
 }
 
+static inline void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+}
+
 static inline void locks_init_lock(struct file_lock *fl)
 {
        return;
@@ -1135,7 +1153,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
        return -EINVAL;
 }
 
-static inline int lease_modify(struct file_lock **before, int arg,
+static inline int lease_modify(struct file_lock *fl, int arg,
                               struct list_head *dispose)
 {
        return -EINVAL;
@@ -1957,7 +1975,7 @@ static inline int locks_verify_truncate(struct inode *inode,
                                    struct file *filp,
                                    loff_t size)
 {
-       if (inode->i_flock && mandatory_lock(inode))
+       if (inode->i_flctx && mandatory_lock(inode))
                return locks_mandatory_area(
                        FLOCK_VERIFY_WRITE, inode, filp,
                        size < inode->i_size ? size : inode->i_size,
@@ -1971,11 +1989,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
 {
        /*
         * Since this check is lockless, we must ensure that any refcounts
-        * taken are done before checking inode->i_flock. Otherwise, we could
-        * end up racing with tasks trying to set a new lease on this file.
+        * taken are done before checking i_flctx->flc_lease. Otherwise, we
+        * could end up racing with tasks trying to set a new lease on this
+        * file.
         */
        smp_mb();
-       if (inode->i_flock)
+       if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
                return __break_lease(inode, mode, FL_LEASE);
        return 0;
 }
@@ -1984,11 +2003,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
 {
        /*
         * Since this check is lockless, we must ensure that any refcounts
-        * taken are done before checking inode->i_flock. Otherwise, we could
-        * end up racing with tasks trying to set a new lease on this file.
+        * taken are done before checking i_flctx->flc_lease. Otherwise, we
+        * could end up racing with tasks trying to set a new lease on this
+        * file.
         */
        smp_mb();
-       if (inode->i_flock)
+       if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
                return __break_lease(inode, mode, FL_DELEG);
        return 0;
 }
index 31229e0..d326152 100644 (file)
@@ -956,15 +956,6 @@ void __log_wait_for_space(journal_t *journal);
 extern void    __journal_drop_transaction(journal_t *, transaction_t *);
 extern int     cleanup_journal_tail(journal_t *);
 
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do {                                                                      \
-       printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
-       current->state = TASK_UNINTERRUPTIBLE;                             \
-       schedule();                                                        \
-} while (1)
-
 /*
  * is_journal_abort
  *
index 704b9a5..20e7f78 100644 (file)
@@ -1251,15 +1251,6 @@ void __jbd2_log_wait_for_space(journal_t *journal);
 extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
 extern int jbd2_cleanup_journal_tail(journal_t *);
 
-/* Debugging code only: */
-
-#define jbd_ENOSYS() \
-do {                                                                      \
-       printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
-       current->state = TASK_UNINTERRUPTIBLE;                             \
-       schedule();                                                        \
-} while (1)
-
 /*
  * is_journal_abort
  *
index a0da685..65db4ae 100644 (file)
@@ -284,6 +284,13 @@ struct vm_operations_struct {
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr);
 #endif
+       /*
+        * Called by vm_normal_page() for special PTEs to find the
+        * page for @addr.  This is useful if the default behavior
+        * (using pte_page()) would not find the correct page.
+        */
+       struct page *(*find_special_page)(struct vm_area_struct *vma,
+                                         unsigned long addr);
 };
 
 struct mmu_gather;
index e1f5fcd..5ed7bda 100644 (file)
@@ -121,8 +121,12 @@ enum pageflags {
        PG_fscache = PG_private_2,      /* page backed by cache */
 
        /* XEN */
+       /* Pinned in Xen as a read-only pagetable page. */
        PG_pinned = PG_owner_priv_1,
+       /* Pinned as part of domain save (see xen_mm_pin_all()). */
        PG_savepinned = PG_dirty,
+       /* Has a grant mapping of another (foreign) domain's page. */
+       PG_foreign = PG_owner_priv_1,
 
        /* SLOB */
        PG_slob_free = PG_private,
@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
 PAGEFLAG(Checked, checked)             /* Used by some filesystems */
 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned)    /* Xen */
 PAGEFLAG(SavePinned, savepinned);                      /* Xen */
+PAGEFLAG(Foreign, foreign);                            /* Xen */
 PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
 PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
        __SETPAGEFLAG(SwapBacked, swapbacked)
index 9603094..211e9da 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/atomic.h>
 #include <linux/device.h>
 #include <linux/io.h>
+#include <linux/resource_ext.h>
 #include <uapi/linux/pci.h>
 
 #include <linux/pci_ids.h>
@@ -177,6 +178,8 @@ enum pci_dev_flags {
        PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
        /* Do not use bus resets for device */
        PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+       /* Do not use PM reset even if device advertises NoSoftRst- */
+       PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
 };
 
 enum pci_irq_reroute_variant {
@@ -397,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
        return (pdev->error_state != pci_channel_io_normal);
 }
 
-struct pci_host_bridge_window {
-       struct list_head list;
-       struct resource *res;           /* host bridge aperture (CPU address) */
-       resource_size_t offset;         /* bus address + offset = CPU address */
-};
-
 struct pci_host_bridge {
        struct device dev;
        struct pci_bus *bus;            /* root bus */
-       struct list_head windows;       /* pci_host_bridge_windows */
+       struct list_head windows;       /* resource_entry */
        void (*release_fn)(struct pci_host_bridge *);
        void *release_data;
 };
@@ -562,6 +559,7 @@ static inline int pcibios_err_to_errno(int err)
 /* Low-level architecture-dependent routines */
 
 struct pci_ops {
+       void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
        int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
        int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
 };
@@ -859,6 +857,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
                              int where, u16 val);
 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
                               int where, u32 val);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+                           int where, int size, u32 *val);
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+                           int where, int size, u32 val);
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+                             int where, int size, u32 *val);
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+                              int where, int size, u32 val);
+
 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
 
 static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
@@ -1850,6 +1858,8 @@ static inline void pci_set_of_node(struct pci_dev *dev) { }
 static inline void pci_release_of_node(struct pci_dev *dev) { }
 static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
 #endif  /* CONFIG_OF */
 
 #ifdef CONFIG_EEH
index 8b59763..e2f1be6 100644 (file)
@@ -597,7 +597,7 @@ struct dev_pm_info {
 
 extern void update_pm_runtime_accounting(struct device *dev);
 extern int dev_pm_get_subsys_data(struct device *dev);
-extern int dev_pm_put_subsys_data(struct device *dev);
+extern void dev_pm_put_subsys_data(struct device *dev);
 
 /*
  * Power domains provide callbacks that are executed during system suspend,
index a9edab2..080e778 100644 (file)
@@ -113,8 +113,6 @@ struct generic_pm_domain_data {
        struct pm_domain_data base;
        struct gpd_timing_data td;
        struct notifier_block nb;
-       struct mutex lock;
-       unsigned int refcount;
        int need_restore;
 };
 
@@ -140,7 +138,6 @@ extern int __pm_genpd_name_add_device(const char *domain_name,
 
 extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
                                  struct device *dev);
-extern void pm_genpd_dev_need_restore(struct device *dev, bool val);
 extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                  struct generic_pm_domain *new_subdomain);
 extern int pm_genpd_add_subdomain_names(const char *master_name,
@@ -187,7 +184,6 @@ static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
 {
        return -ENOSYS;
 }
-static inline void pm_genpd_dev_need_restore(struct device *dev, bool val) {}
 static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                                         struct generic_pm_domain *new_sd)
 {
index 097d7eb..d534e8e 100644 (file)
@@ -216,19 +216,21 @@ struct mem_dqinfo {
        unsigned long dqi_flags;
        unsigned int dqi_bgrace;
        unsigned int dqi_igrace;
-       qsize_t dqi_maxblimit;
-       qsize_t dqi_maxilimit;
+       qsize_t dqi_max_spc_limit;
+       qsize_t dqi_max_ino_limit;
        void *dqi_priv;
 };
 
 struct super_block;
 
-#define DQF_MASK 0xffff                /* Mask for format specific flags */
-#define DQF_GETINFO_MASK 0x1ffff       /* Mask for flags passed to userspace */
-#define DQF_SETINFO_MASK 0xffff                /* Mask for flags modifiable from userspace */
-#define DQF_SYS_FILE_B         16
-#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)     /* Quota file stored as system file */
-#define DQF_INFO_DIRTY_B       31
+/* Mask for flags passed to userspace */
+#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
+/* Mask for flags modifiable from userspace */
+#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
+
+enum {
+       DQF_INFO_DIRTY_B = DQF_PRIVATE,
+};
 #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
 
 extern void mark_info_dirty(struct super_block *sb, int type);
@@ -367,15 +369,15 @@ struct qc_dqblk {
 /* Operations handling requests from userspace */
 struct quotactl_ops {
        int (*quota_on)(struct super_block *, int, int, struct path *);
-       int (*quota_on_meta)(struct super_block *, int, int);
        int (*quota_off)(struct super_block *, int);
+       int (*quota_enable)(struct super_block *, unsigned int);
+       int (*quota_disable)(struct super_block *, unsigned int);
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
        int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
-       int (*set_xstate)(struct super_block *, unsigned int, int);
        int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
        int (*rm_xquota)(struct super_block *, unsigned int);
 };
index 29e3455..df73258 100644 (file)
@@ -166,6 +166,7 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
  */
 extern const struct dquot_operations dquot_operations;
 extern const struct quotactl_ops dquot_quotactl_ops;
+extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
 
 #else
 
@@ -386,4 +387,6 @@ static inline void dquot_release_reservation_block(struct inode *inode,
        __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
 }
 
+unsigned int qtype_enforce_flag(int type);
+
 #endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644 (file)
index 0000000..e2bf63d
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Jiang Liu <jiang.liu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_RESOURCE_EXT_H
+#define _LINUX_RESOURCE_EXT_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+/* Represent resource window for bridge devices */
+struct resource_win {
+       struct resource res;            /* In master (CPU) address space */
+       resource_size_t offset;         /* Translation offset for bridge */
+};
+
+/*
+ * Common resource list management data structure and interfaces to support
+ * ACPI, PNP and PCI host bridge etc.
+ */
+struct resource_entry {
+       struct list_head        node;
+       struct resource         *res;   /* In master (CPU) address space */
+       resource_size_t         offset; /* Translation offset for bridge */
+       struct resource         __res;  /* Default storage for res */
+};
+
+extern struct resource_entry *
+resource_list_create_entry(struct resource *res, size_t extra_size);
+extern void resource_list_free(struct list_head *head);
+
+static inline void resource_list_add(struct resource_entry *entry,
+                                    struct list_head *head)
+{
+       list_add(&entry->node, head);
+}
+
+static inline void resource_list_add_tail(struct resource_entry *entry,
+                                         struct list_head *head)
+{
+       list_add_tail(&entry->node, head);
+}
+
+static inline void resource_list_del(struct resource_entry *entry)
+{
+       list_del(&entry->node);
+}
+
+static inline void resource_list_free_entry(struct resource_entry *entry)
+{
+       kfree(entry);
+}
+
+static inline void
+resource_list_destroy_entry(struct resource_entry *entry)
+{
+       resource_list_del(entry);
+       resource_list_free_entry(entry);
+}
+
+#define resource_list_for_each_entry(entry, list)      \
+       list_for_each_entry((entry), (list), node)
+
+#define resource_list_for_each_entry_safe(entry, tmp, list)    \
+       list_for_each_entry_safe((entry), (tmp), (list), node)
+
+#endif /* _LINUX_RESOURCE_EXT_H */
index 4a1d0cc..efe3443 100644 (file)
 #define  PCI_EXP_DEVCTL_AUX_PME        0x0400  /* Auxiliary Power PM Enable */
 #define  PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800  /* Enable No Snoop */
 #define  PCI_EXP_DEVCTL_READRQ 0x7000  /* Max_Read_Request_Size */
+#define  PCI_EXP_DEVCTL_READRQ_128B  0x0000 /* 128 Bytes */
+#define  PCI_EXP_DEVCTL_READRQ_256B  0x1000 /* 256 Bytes */
+#define  PCI_EXP_DEVCTL_READRQ_512B  0x2000 /* 512 Bytes */
+#define  PCI_EXP_DEVCTL_READRQ_1024B 0x3000 /* 1024 Bytes */
 #define  PCI_EXP_DEVCTL_BCR_FLR 0x8000  /* Bridge Configuration Retry / FLR */
 #define PCI_EXP_DEVSTA         10      /* Device Status */
 #define  PCI_EXP_DEVSTA_CED    0x0001  /* Correctable Error Detected */
index 3b6cfbe..1f49b83 100644 (file)
@@ -126,10 +126,22 @@ struct if_dqblk {
 #define IIF_FLAGS      4
 #define IIF_ALL                (IIF_BGRACE | IIF_IGRACE | IIF_FLAGS)
 
+enum {
+       DQF_ROOT_SQUASH_B = 0,
+       DQF_SYS_FILE_B = 16,
+       /* Kernel internal flags invisible to userspace */
+       DQF_PRIVATE
+};
+
+/* Root squash enabled (for v1 quota format) */
+#define DQF_ROOT_SQUASH        (1 << DQF_ROOT_SQUASH_B)
+/* Quota stored in a system file */
+#define DQF_SYS_FILE   (1 << DQF_SYS_FILE_B)
+
 struct if_dqinfo {
        __u64 dqi_bgrace;
        __u64 dqi_igrace;
-       __u32 dqi_flags;
+       __u32 dqi_flags;        /* DFQ_* */
        __u32 dqi_valid;
 };
 
index 3387465..143ca5f 100644 (file)
@@ -45,6 +45,8 @@
 #include <asm/xen/hypervisor.h>
 
 #include <xen/features.h>
+#include <linux/mm_types.h>
+#include <linux/page-flags.h>
 
 #define GNTTAB_RESERVED_XENSTORE 1
 
@@ -58,6 +60,22 @@ struct gnttab_free_callback {
        u16 count;
 };
 
+struct gntab_unmap_queue_data;
+
+typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
+
+struct gntab_unmap_queue_data
+{
+       struct delayed_work     gnttab_work;
+       void *data;
+       gnttab_unmap_refs_done  done;
+       struct gnttab_unmap_grant_ref *unmap_ops;
+       struct gnttab_unmap_grant_ref *kunmap_ops;
+       struct page **pages;
+       unsigned int count;
+       unsigned int age;
+};
+
 int gnttab_init(void);
 int gnttab_suspend(void);
 int gnttab_resume(void);
@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void);
 
 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
 
+int gnttab_alloc_pages(int nr_pages, struct page **pages);
+void gnttab_free_pages(int nr_pages, struct page **pages);
+
 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                    struct gnttab_map_grant_ref *kmap_ops,
                    struct page **pages, unsigned int count);
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
-                     struct gnttab_map_grant_ref *kunmap_ops,
+                     struct gnttab_unmap_grant_ref *kunmap_ops,
                      struct page **pages, unsigned int count);
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
 
 /* Perform a batch of grant map/copy operations. Retry every batch slot
  * for which the hypervisor returns GNTST_eagain. This is typically due
@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
 
+
+struct xen_page_foreign {
+       domid_t domid;
+       grant_ref_t gref;
+};
+
+static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
+{
+       if (!PageForeign(page))
+               return NULL;
+#if BITS_PER_LONG < 64
+       return (struct xen_page_foreign *)page->private;
+#else
+       BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
+       return (struct xen_page_foreign *)&page->private;
+#endif
+}
+
 #endif /* __ASM_GNTTAB_H__ */
index 131a6cc..6ad3d11 100644 (file)
 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
 #define XENFEAT_mmu_pt_update_preserve_ad  5
 
+/*
+ * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
+ * available pte bits.
+ */
+#define XENFEAT_gnttab_map_avail_bits      7
+
 /* x86: Does this Xen host support the HVM callback vector type? */
 #define XENFEAT_hvm_callback_vector        8
 
index bcce564..56806bc 100644 (file)
@@ -525,6 +525,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
 #define _GNTMAP_contains_pte    (4)
 #define GNTMAP_contains_pte     (1<<_GNTMAP_contains_pte)
 
+/*
+ * Bits to be placed in guest kernel available PTE bits (architecture
+ * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
+ */
+#define _GNTMAP_guest_avail0    (16)
+#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
+
 /*
  * Values for error status returns. All errors are -ve.
  */
index 5f4c006..97b0df7 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/platform_device.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 
 #include <linux/uaccess.h>
 #include <linux/export.h>
@@ -182,6 +184,81 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
        c->target_value = value;
 }
 
+static inline int pm_qos_get_value(struct pm_qos_constraints *c);
+static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
+{
+       struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
+       struct pm_qos_constraints *c;
+       struct pm_qos_request *req;
+       char *type;
+       unsigned long flags;
+       int tot_reqs = 0;
+       int active_reqs = 0;
+
+       if (IS_ERR_OR_NULL(qos)) {
+               pr_err("%s: bad qos param!\n", __func__);
+               return -EINVAL;
+       }
+       c = qos->constraints;
+       if (IS_ERR_OR_NULL(c)) {
+               pr_err("%s: Bad constraints on qos?\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Lock to ensure we have a snapshot */
+       spin_lock_irqsave(&pm_qos_lock, flags);
+       if (plist_head_empty(&c->list)) {
+               seq_puts(s, "Empty!\n");
+               goto out;
+       }
+
+       switch (c->type) {
+       case PM_QOS_MIN:
+               type = "Minimum";
+               break;
+       case PM_QOS_MAX:
+               type = "Maximum";
+               break;
+       case PM_QOS_SUM:
+               type = "Sum";
+               break;
+       default:
+               type = "Unknown";
+       }
+
+       plist_for_each_entry(req, &c->list, node) {
+               char *state = "Default";
+
+               if ((req->node).prio != c->default_value) {
+                       active_reqs++;
+                       state = "Active";
+               }
+               tot_reqs++;
+               seq_printf(s, "%d: %d: %s\n", tot_reqs,
+                          (req->node).prio, state);
+       }
+
+       seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
+                  type, pm_qos_get_value(c), active_reqs, tot_reqs);
+
+out:
+       spin_unlock_irqrestore(&pm_qos_lock, flags);
+       return 0;
+}
+
+static int pm_qos_dbg_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pm_qos_dbg_show_requests,
+                          inode->i_private);
+}
+
+static const struct file_operations pm_qos_debug_fops = {
+       .open           = pm_qos_dbg_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 /**
  * pm_qos_update_target - manages the constraints list and calls the notifiers
  *  if needed
@@ -509,12 +586,17 @@ int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
 EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
 
 /* User space interface to PM QoS classes via misc devices */
-static int register_pm_qos_misc(struct pm_qos_object *qos)
+static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
 {
        qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
        qos->pm_qos_power_miscdev.name = qos->name;
        qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
 
+       if (d) {
+               (void)debugfs_create_file(qos->name, S_IRUGO, d,
+                                         (void *)qos, &pm_qos_debug_fops);
+       }
+
        return misc_register(&qos->pm_qos_power_miscdev);
 }
 
@@ -608,11 +690,16 @@ static int __init pm_qos_power_init(void)
 {
        int ret = 0;
        int i;
+       struct dentry *d;
 
        BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
 
+       d = debugfs_create_dir("pm_qos", NULL);
+       if (IS_ERR_OR_NULL(d))
+               d = NULL;
+
        for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
-               ret = register_pm_qos_misc(pm_qos_array[i]);
+               ret = register_pm_qos_misc(pm_qos_array[i], d);
                if (ret < 0) {
                        printk(KERN_ERR "pm_qos_param: %s setup failed\n",
                               pm_qos_array[i]->name);
index 0c40c16..c24d5a2 100644 (file)
@@ -1472,9 +1472,9 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
 /**
  * free_unnecessary_pages - Release preallocated pages not needed for the image
  */
-static void free_unnecessary_pages(void)
+static unsigned long free_unnecessary_pages(void)
 {
-       unsigned long save, to_free_normal, to_free_highmem;
+       unsigned long save, to_free_normal, to_free_highmem, free;
 
        save = count_data_pages();
        if (alloc_normal >= save) {
@@ -1495,6 +1495,7 @@ static void free_unnecessary_pages(void)
                else
                        to_free_normal = 0;
        }
+       free = to_free_normal + to_free_highmem;
 
        memory_bm_position_reset(&copy_bm);
 
@@ -1518,6 +1519,8 @@ static void free_unnecessary_pages(void)
                swsusp_unset_page_free(page);
                __free_page(page);
        }
+
+       return free;
 }
 
 /**
@@ -1707,7 +1710,7 @@ int hibernate_preallocate_memory(void)
         * pages in memory, but we have allocated more.  Release the excessive
         * ones now.
         */
-       free_unnecessary_pages();
+       pages -= free_unnecessary_pages();
 
  out:
        stop = ktime_get();
@@ -2310,8 +2313,6 @@ static inline void free_highmem_data(void)
                free_image_page(buffer, PG_UNSAFE_CLEAR);
 }
 #else
-static inline int get_safe_write_buffer(void) { return 0; }
-
 static unsigned int
 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
 
index 0bcebff..19f2357 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/device.h>
 #include <linux/pfn.h>
 #include <linux/mm.h>
+#include <linux/resource_ext.h>
 #include <asm/io.h>
 
 
@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr)
        return err;
 }
 
+struct resource_entry *resource_list_create_entry(struct resource *res,
+                                                 size_t extra_size)
+{
+       struct resource_entry *entry;
+
+       entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
+       if (entry) {
+               INIT_LIST_HEAD(&entry->node);
+               entry->res = res ? res : &entry->__res;
+       }
+
+       return entry;
+}
+EXPORT_SYMBOL(resource_list_create_entry);
+
+void resource_list_free(struct list_head *head)
+{
+       struct resource_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, head, node)
+               resource_list_destroy_entry(entry);
+}
+EXPORT_SYMBOL(resource_list_free);
+
 static int __init strict_iomem(char *str)
 {
        if (strstr(str, "relaxed"))
index 1c71382..eb4220a 100644 (file)
@@ -13,5 +13,6 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/power.h>
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
 
index 988d309..d63849b 100644 (file)
@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
        if (HAVE_PTE_SPECIAL) {
                if (likely(!pte_special(pte)))
                        goto check_pfn;
+               if (vma->vm_ops && vma->vm_ops->find_special_page)
+                       return vma->vm_ops->find_special_page(vma, addr);
                if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
                        return NULL;
                if (!is_zero_pfn(pfn))
index 13c2144..1c56bf5 100644 (file)
@@ -851,7 +851,7 @@ static int __init AtaIrqInit(void)
        st_mfp.tim_dt_a = 1;    /* Cause interrupt after first event. */
        st_mfp.tim_ct_a = 8;    /* Turn on event counting. */
        /* Register interrupt handler. */
-       if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, IRQ_TYPE_SLOW, "DMA sound",
+       if (request_irq(IRQ_MFP_TIMA, AtaInterrupt, 0, "DMA sound",
                        AtaInterrupt))
                return 0;
        st_mfp.int_en_a |= 0x20;        /* Turn interrupt on. */
index f4b9533..eec6880 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2f0f34a..5da129e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c13ff9c..b51e40a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0dc2485..92f1fd7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 733f9e4..e153fcb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 99b47b6..3853a73 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7ccb073..6858c08 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a2d37d6..84bdef0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 24d3296..c736adf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d470046..8f2fe16 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 853b4da..d0ba653 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2014, Intel Corp.
+ * Copyright (C) 2000 - 2015, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2e2ba2e..3ed7c04 100644 (file)
@@ -209,7 +209,7 @@ $(OUTPUT)%.o: %.c
 
 $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
        $(ECHO) "  CC      " $@
-       $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -lrt -lpci -L$(OUTPUT) -o $@
+       $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lcpupower -Wl,-rpath=./ -lrt -lpci -L$(OUTPUT) -o $@
        $(QUIET) $(STRIPCMD) $@
 
 $(OUTPUT)po/$(PACKAGE).pot: $(UTIL_SRC)
index 56bfb52..9b95069 100644 (file)
@@ -12,16 +12,16 @@ turbostat \- Report processor frequency and idle statistics
 .RB [ "\-i interval_sec" ]
 .SH DESCRIPTION
 \fBturbostat \fP reports processor topology, frequency,
-idle power-state statistics, temperature and power on modern X86 processors.
-Either \fBcommand\fP is forked and statistics are printed
-upon its completion, or statistics are printed periodically.
-
-\fBturbostat \fP
-must be run on root, and
-minimally requires that the processor
-supports an "invariant" TSC, plus the APERF and MPERF MSRs.
-Additional information is reported depending on hardware counter support.
-
+idle power-state statistics, temperature and power on X86 processors.
+There are two ways to invoke turbostat.
+The first method is to supply a
+\fBcommand\fP, which is forked and statistics are printed
+upon its completion.
+The second method is to omit the command,
+and turbodstat will print statistics every 5 seconds.
+The 5-second interval can changed using the -i option.
+
+Some information is not availalbe on older processors.
 .SS Options
 The \fB-p\fP option limits output to the 1st thread in 1st core of each package.
 .PP
@@ -130,12 +130,13 @@ cpu3: MSR_IA32_THERM_STATUS: 0x884e0000 (27 C +/- 1)
  ...
 .fi
 The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
-available at the minimum package voltage.  The \fBTSC frequency\fP is the nominal
-maximum frequency of the processor if turbo-mode were not available.  This frequency
+available at the minimum package voltage.  The \fBTSC frequency\fP is the base
+frequency of the processor -- this should match the brand string
+in /proc/cpuinfo.  This base frequency
 should be sustainable on all CPUs indefinitely, given nominal power and cooling.
 The remaining rows show what maximum turbo frequency is possible
-depending on the number of idle cores.  Note that this information is
-not available on all processors.
+depending on the number of idle cores.  Note that not all information is
+available on all processors.
 .SH FORK EXAMPLE
 If turbostat is invoked with a command, it will fork that command
 and output the statistics gathered when the command exits.
@@ -176,6 +177,11 @@ not including any non-busy idle time.
 
 .B "turbostat "
 must be run as root.
+Alternatively, non-root users can be enabled to run turbostat this way:
+
+# setcap cap_sys_rawio=ep ./turbostat
+
+# chmod +r /dev/cpu/*/msr
 
 .B "turbostat "
 reads hardware counters, but doesn't write them.
@@ -184,15 +190,33 @@ multiple invocations of itself.
 
 \fBturbostat \fP
 may work poorly on Linux-2.6.20 through 2.6.29,
-as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
+as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF MSRs
 in those kernels.
 
-If the TSC column does not make sense, then
-the other numbers will also make no sense.
-Turbostat is lightweight, and its data collection is not atomic.
-These issues are usually caused by an extremely short measurement
-interval (much less than 1 second), or system activity that prevents
-turbostat from being able to run on all CPUS to quickly collect data.
+AVG_MHz = APERF_delta/measurement_interval.  This is the actual
+number of elapsed cycles divided by the entire sample interval --
+including idle time.  Note that this calculation is resiliant
+to systems lacking a non-stop TSC.
+
+TSC_MHz = TSC_delta/measurement_interval.
+On a system with an invariant TSC, this value will be constant
+and will closely match the base frequency value shown
+in the brand string in /proc/cpuinfo.  On a system where
+the TSC stops in idle, TSC_MHz will drop
+below the processor's base frequency.
+
+%Busy = MPERF_delta/TSC_delta
+
+Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval
+
+Note that these calculations depend on TSC_delta, so they
+are not reliable during intervals when TSC_MHz is not running at the base frequency.
+
+Turbostat data collection is not atomic.
+Extremely short measurement intervals (much less than 1 second),
+or system activity that prevents turbostat from being able
+to run on all CPUS to quickly collect data, will result in
+inconsistent results.
 
 The APERF, MPERF MSRs are defined to count non-halted cycles.
 Although it is not guaranteed by the architecture, turbostat assumes
index 5b1b807..a02c02f 100644 (file)
@@ -38,6 +38,8 @@
 #include <ctype.h>
 #include <sched.h>
 #include <cpuid.h>
+#include <linux/capability.h>
+#include <errno.h>
 
 char *proc_stat = "/proc/stat";
 unsigned int interval_sec = 5; /* set with -i interval_sec */
@@ -59,8 +61,8 @@ unsigned int has_epb;
 unsigned int units = 1000000;  /* MHz etc */
 unsigned int genuine_intel;
 unsigned int has_invariant_tsc;
-unsigned int do_nehalem_platform_info;
-unsigned int do_nehalem_turbo_ratio_limit;
+unsigned int do_nhm_platform_info;
+unsigned int do_nhm_turbo_ratio_limit;
 unsigned int do_ivt_turbo_ratio_limit;
 unsigned int extra_msr_offset32;
 unsigned int extra_msr_offset64;
@@ -81,6 +83,9 @@ unsigned int tcc_activation_temp;
 unsigned int tcc_activation_temp_override;
 double rapl_power_units, rapl_energy_units, rapl_time_units;
 double rapl_joule_counter_range;
+unsigned int do_core_perf_limit_reasons;
+unsigned int do_gfx_perf_limit_reasons;
+unsigned int do_ring_perf_limit_reasons;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -251,15 +256,13 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
        sprintf(pathname, "/dev/cpu/%d/msr", cpu);
        fd = open(pathname, O_RDONLY);
        if (fd < 0)
-               return -1;
+               err(-1, "%s open failed, try chown or chmod +r /dev/cpu/*/msr, or run as root", pathname);
 
        retval = pread(fd, msr, sizeof *msr, offset);
        close(fd);
 
-       if (retval != sizeof *msr) {
-               fprintf(stderr, "%s offset 0x%llx read failed\n", pathname, (unsigned long long)offset);
-               return -1;
-       }
+       if (retval != sizeof *msr)
+               err(-1, "%s offset 0x%llx read failed", pathname, (unsigned long long)offset);
 
        return 0;
 }
@@ -281,7 +284,7 @@ void print_header(void)
                outp += sprintf(outp, "     CPU");
        if (has_aperf)
                outp += sprintf(outp, " Avg_MHz");
-       if (do_nhm_cstates)
+       if (has_aperf)
                outp += sprintf(outp, "   %%Busy");
        if (has_aperf)
                outp += sprintf(outp, " Bzy_MHz");
@@ -337,7 +340,7 @@ void print_header(void)
                        outp += sprintf(outp, "   PKG_%%");
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
                        outp += sprintf(outp, "   RAM_%%");
-       } else {
+       } else if (do_rapl && rapl_joules) {
                if (do_rapl & RAPL_PKG)
                        outp += sprintf(outp, "   Pkg_J");
                if (do_rapl & RAPL_CORES)
@@ -457,25 +460,25 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, "%8d", t->cpu_id);
        }
 
-       /* AvgMHz */
+       /* Avg_MHz */
        if (has_aperf)
                outp += sprintf(outp, "%8.0f",
                        1.0 / units * t->aperf / interval_float);
 
-       /* %c0 */
-       if (do_nhm_cstates) {
+       /* %Busy */
+       if (has_aperf) {
                if (!skip_c0)
                        outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
                else
                        outp += sprintf(outp, "********");
        }
 
-       /* BzyMHz */
+       /* Bzy_MHz */
        if (has_aperf)
                outp += sprintf(outp, "%8.0f",
                        1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
 
-       /* TSC */
+       /* TSC_MHz */
        outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
 
        /* SMI */
@@ -561,7 +564,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
                        outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
-       } else {
+       } else if (do_rapl && rapl_joules) {
                if (do_rapl & RAPL_PKG)
                        outp += sprintf(outp, fmt8,
                                        p->energy_pkg * rapl_energy_units);
@@ -578,8 +581,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
                        outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
-       outp += sprintf(outp, fmt8, interval_float);
 
+               outp += sprintf(outp, fmt8, interval_float);
        }
 done:
        outp += sprintf(outp, "\n");
@@ -670,24 +673,26 @@ delta_thread(struct thread_data *new, struct thread_data *old,
 
        old->c1 = new->c1 - old->c1;
 
-       if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
-               old->aperf = new->aperf - old->aperf;
-               old->mperf = new->mperf - old->mperf;
-       } else {
+       if (has_aperf) {
+               if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
+                       old->aperf = new->aperf - old->aperf;
+                       old->mperf = new->mperf - old->mperf;
+               } else {
 
-               if (!aperf_mperf_unstable) {
-                       fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
-                       fprintf(stderr, "* Frequency results do not cover entire interval *\n");
-                       fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+                       if (!aperf_mperf_unstable) {
+                               fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+                               fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+                               fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
 
-                       aperf_mperf_unstable = 1;
+                               aperf_mperf_unstable = 1;
+                       }
+                       /*
+                        * mperf delta is likely a huge "positive" number
+                        * can not use it for calculating c0 time
+                        */
+                       skip_c0 = 1;
+                       skip_c1 = 1;
                }
-               /*
-                * mperf delta is likely a huge "positive" number
-                * can not use it for calculating c0 time
-                */
-               skip_c0 = 1;
-               skip_c1 = 1;
        }
 
 
@@ -1019,7 +1024,7 @@ void print_verbose_header(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       if (!do_nehalem_platform_info)
+       if (!do_nhm_platform_info)
                return;
 
        get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
@@ -1132,7 +1137,7 @@ print_nhm_turbo_ratio_limits:
        }
        fprintf(stderr, ")\n");
 
-       if (!do_nehalem_turbo_ratio_limit)
+       if (!do_nhm_turbo_ratio_limit)
                return;
 
        get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
@@ -1178,6 +1183,7 @@ print_nhm_turbo_ratio_limits:
        if (ratio)
                fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
                        ratio, bclk, ratio * bclk);
+
 }
 
 void free_all_buffers(void)
@@ -1458,17 +1464,60 @@ void check_dev_msr()
        struct stat sb;
 
        if (stat("/dev/cpu/0/msr", &sb))
-               err(-5, "no /dev/cpu/0/msr\n"
-                   "Try \"# modprobe msr\"");
+               err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
 
-void check_super_user()
+void check_permissions()
 {
-       if (getuid() != 0)
-               errx(-6, "must be root");
+       struct __user_cap_header_struct cap_header_data;
+       cap_user_header_t cap_header = &cap_header_data;
+       struct __user_cap_data_struct cap_data_data;
+       cap_user_data_t cap_data = &cap_data_data;
+       extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
+       int do_exit = 0;
+
+       /* check for CAP_SYS_RAWIO */
+       cap_header->pid = getpid();
+       cap_header->version = _LINUX_CAPABILITY_VERSION;
+       if (capget(cap_header, cap_data) < 0)
+               err(-6, "capget(2) failed");
+
+       if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
+               do_exit++;
+               warnx("capget(CAP_SYS_RAWIO) failed,"
+                       " try \"# setcap cap_sys_rawio=ep %s\"", progname);
+       }
+
+       /* test file permissions */
+       if (euidaccess("/dev/cpu/0/msr", R_OK)) {
+               do_exit++;
+               warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
+       }
+
+       /* if all else fails, thell them to be root */
+       if (do_exit)
+               if (getuid() != 0)
+                       warnx("... or simply run as root");
+
+       if (do_exit)
+               exit(-6);
 }
 
-int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
+/*
+ * NHM adds support for additional MSRs:
+ *
+ * MSR_SMI_COUNT                   0x00000034
+ *
+ * MSR_NHM_PLATFORM_INFO           0x000000ce
+ * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
+ *
+ * MSR_PKG_C3_RESIDENCY            0x000003f8
+ * MSR_PKG_C6_RESIDENCY            0x000003f9
+ * MSR_CORE_C3_RESIDENCY           0x000003fc
+ * MSR_CORE_C6_RESIDENCY           0x000003fd
+ *
+ */
+int has_nhm_msrs(unsigned int family, unsigned int model)
 {
        if (!genuine_intel)
                return 0;
@@ -1495,13 +1544,27 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x3D:      /* BDW */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
-               return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
+               return 1;
        default:
                return 0;
        }
 }
+int has_nhm_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+       if (!has_nhm_msrs(family, model))
+               return 0;
+
+       switch (model) {
+       /* Nehalem compatible, but do not include turbo-ratio limit support */
+       case 0x2E:      /* Nehalem-EX Xeon - Beckton */
+       case 0x2F:      /* Westmere-EX Xeon - Eagleton */
+               return 0;
+       default:
+               return 1;
+       }
+}
 int has_ivt_turbo_ratio_limit(unsigned int family, unsigned int model)
 {
        if (!genuine_intel)
@@ -1564,6 +1627,103 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        return 0;
 }
 
+/*
+ * print_perf_limit()
+ */
+int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+       unsigned long long msr;
+       int cpu;
+
+       cpu = t->cpu_id;
+
+       /* per-package */
+       if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+               return 0;
+
+       if (cpu_migrate(cpu)) {
+               fprintf(stderr, "Could not migrate to CPU %d\n", cpu);
+               return -1;
+       }
+
+       if (do_core_perf_limit_reasons) {
+               get_msr(cpu, MSR_CORE_PERF_LIMIT_REASONS, &msr);
+               fprintf(stderr, "cpu%d: MSR_CORE_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+               fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)",
+                       (msr & 1 << 0) ? "PROCHOT, " : "",
+                       (msr & 1 << 1) ? "ThermStatus, " : "",
+                       (msr & 1 << 2) ? "bit2, " : "",
+                       (msr & 1 << 4) ? "Graphics, " : "",
+                       (msr & 1 << 5) ? "Auto-HWP, " : "",
+                       (msr & 1 << 6) ? "VR-Therm, " : "",
+                       (msr & 1 << 8) ? "Amps, " : "",
+                       (msr & 1 << 9) ? "CorePwr, " : "",
+                       (msr & 1 << 10) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 11) ? "PkgPwrL2, " : "",
+                       (msr & 1 << 12) ? "MultiCoreTurbo, " : "",
+                       (msr & 1 << 13) ? "Transitions, " : "",
+                       (msr & 1 << 14) ? "bit14, " : "",
+                       (msr & 1 << 15) ? "bit15, " : "");
+               fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
+                       (msr & 1 << 16) ? "PROCHOT, " : "",
+                       (msr & 1 << 17) ? "ThermStatus, " : "",
+                       (msr & 1 << 18) ? "bit18, " : "",
+                       (msr & 1 << 20) ? "Graphics, " : "",
+                       (msr & 1 << 21) ? "Auto-HWP, " : "",
+                       (msr & 1 << 22) ? "VR-Therm, " : "",
+                       (msr & 1 << 24) ? "Amps, " : "",
+                       (msr & 1 << 25) ? "CorePwr, " : "",
+                       (msr & 1 << 26) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 27) ? "PkgPwrL2, " : "",
+                       (msr & 1 << 28) ? "MultiCoreTurbo, " : "",
+                       (msr & 1 << 29) ? "Transitions, " : "",
+                       (msr & 1 << 30) ? "bit30, " : "",
+                       (msr & 1 << 31) ? "bit31, " : "");
+
+       }
+       if (do_gfx_perf_limit_reasons) {
+               get_msr(cpu, MSR_GFX_PERF_LIMIT_REASONS, &msr);
+               fprintf(stderr, "cpu%d: MSR_GFX_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+               fprintf(stderr, " (Active: %s%s%s%s%s%s%s%s)",
+                       (msr & 1 << 0) ? "PROCHOT, " : "",
+                       (msr & 1 << 1) ? "ThermStatus, " : "",
+                       (msr & 1 << 4) ? "Graphics, " : "",
+                       (msr & 1 << 6) ? "VR-Therm, " : "",
+                       (msr & 1 << 8) ? "Amps, " : "",
+                       (msr & 1 << 9) ? "GFXPwr, " : "",
+                       (msr & 1 << 10) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 11) ? "PkgPwrL2, " : "");
+               fprintf(stderr, " (Logged: %s%s%s%s%s%s%s%s)\n",
+                       (msr & 1 << 16) ? "PROCHOT, " : "",
+                       (msr & 1 << 17) ? "ThermStatus, " : "",
+                       (msr & 1 << 20) ? "Graphics, " : "",
+                       (msr & 1 << 22) ? "VR-Therm, " : "",
+                       (msr & 1 << 24) ? "Amps, " : "",
+                       (msr & 1 << 25) ? "GFXPwr, " : "",
+                       (msr & 1 << 26) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 27) ? "PkgPwrL2, " : "");
+       }
+       if (do_ring_perf_limit_reasons) {
+               get_msr(cpu, MSR_RING_PERF_LIMIT_REASONS, &msr);
+               fprintf(stderr, "cpu%d: MSR_RING_PERF_LIMIT_REASONS, 0x%08llx", cpu, msr);
+               fprintf(stderr, " (Active: %s%s%s%s%s%s)",
+                       (msr & 1 << 0) ? "PROCHOT, " : "",
+                       (msr & 1 << 1) ? "ThermStatus, " : "",
+                       (msr & 1 << 6) ? "VR-Therm, " : "",
+                       (msr & 1 << 8) ? "Amps, " : "",
+                       (msr & 1 << 10) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 11) ? "PkgPwrL2, " : "");
+               fprintf(stderr, " (Logged: %s%s%s%s%s%s)\n",
+                       (msr & 1 << 16) ? "PROCHOT, " : "",
+                       (msr & 1 << 17) ? "ThermStatus, " : "",
+                       (msr & 1 << 22) ? "VR-Therm, " : "",
+                       (msr & 1 << 24) ? "Amps, " : "",
+                       (msr & 1 << 26) ? "PkgPwrL1, " : "",
+                       (msr & 1 << 27) ? "PkgPwrL2, " : "");
+       }
+       return 0;
+}
+
 #define        RAPL_POWER_GRANULARITY  0x7FFF  /* 15 bit power granularity */
 #define        RAPL_TIME_GRANULARITY   0x3F /* 6 bit time granularity */
 
@@ -1653,6 +1813,27 @@ void rapl_probe(unsigned int family, unsigned int model)
        return;
 }
 
+void perf_limit_reasons_probe(family, model)
+{
+       if (!genuine_intel)
+               return;
+
+       if (family != 6)
+               return;
+
+       switch (model) {
+       case 0x3C:      /* HSW */
+       case 0x45:      /* HSW */
+       case 0x46:      /* HSW */
+               do_gfx_perf_limit_reasons = 1;
+       case 0x3F:      /* HSX */
+               do_core_perf_limit_reasons = 1;
+               do_ring_perf_limit_reasons = 1;
+       default:
+               return;
+       }
+}
+
 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
@@ -1842,8 +2023,15 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        return 0;
 }
 
+/*
+ * SNB adds support for additional MSRs:
+ *
+ * MSR_PKG_C7_RESIDENCY            0x000003fa
+ * MSR_CORE_C7_RESIDENCY           0x000003fe
+ * MSR_PKG_C2_RESIDENCY            0x0000060d
+ */
 
-int is_snb(unsigned int family, unsigned int model)
+int has_snb_msrs(unsigned int family, unsigned int model)
 {
        if (!genuine_intel)
                return 0;
@@ -1865,7 +2053,14 @@ int is_snb(unsigned int family, unsigned int model)
        return 0;
 }
 
-int has_c8_c9_c10(unsigned int family, unsigned int model)
+/*
+ * HSW adds support for additional MSRs:
+ *
+ * MSR_PKG_C8_RESIDENCY            0x00000630
+ * MSR_PKG_C9_RESIDENCY            0x00000631
+ * MSR_PKG_C10_RESIDENCY           0x00000632
+ */
+int has_hsw_msrs(unsigned int family, unsigned int model)
 {
        if (!genuine_intel)
                return 0;
@@ -1917,7 +2112,7 @@ double slm_bclk(void)
 
 double discover_bclk(unsigned int family, unsigned int model)
 {
-       if (is_snb(family, model))
+       if (has_snb_msrs(family, model))
                return 100.00;
        else if (is_slm(family, model))
                return slm_bclk();
@@ -1965,7 +2160,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
        }
 
        /* Temperature Target MSR is Nehalem and newer only */
-       if (!do_nehalem_platform_info)
+       if (!do_nhm_platform_info)
                goto guess;
 
        if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
@@ -2029,18 +2224,15 @@ void check_cpuid()
        ebx = ecx = edx = 0;
        __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
 
-       if (max_level < 0x80000007)
-               errx(1, "CPUID: no invariant TSC (max_level 0x%x)", max_level);
+       if (max_level >= 0x80000007) {
 
-       /*
-        * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
-        * this check is valid for both Intel and AMD
-        */
-       __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
-       has_invariant_tsc = edx & (1 << 8);
-
-       if (!has_invariant_tsc)
-               errx(1, "No invariant TSC");
+               /*
+                * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+                * this check is valid for both Intel and AMD
+                */
+               __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+               has_invariant_tsc = edx & (1 << 8);
+       }
 
        /*
         * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
@@ -2054,26 +2246,22 @@ void check_cpuid()
        has_epb = ecx & (1 << 3);
 
        if (verbose)
-               fprintf(stderr, "CPUID(6): %s%s%s%s\n",
-                       has_aperf ? "APERF" : "No APERF!",
-                       do_dts ? ", DTS" : "",
-                       do_ptm ? ", PTM": "",
-                       has_epb ? ", EPB": "");
-
-       if (!has_aperf)
-               errx(-1, "No APERF");
-
-       do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
-       do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
-       do_smi = do_nhm_cstates;
-       do_snb_cstates = is_snb(family, model);
-       do_c8_c9_c10 = has_c8_c9_c10(family, model);
+               fprintf(stderr, "CPUID(6): %sAPERF, %sDTS, %sPTM, %sEPB\n",
+                       has_aperf ? "" : "No ",
+                       do_dts ? "" : "No ",
+                       do_ptm ? "" : "No ",
+                       has_epb ? "" : "No ");
+
+       do_nhm_platform_info = do_nhm_cstates = do_smi = has_nhm_msrs(family, model);
+       do_snb_cstates = has_snb_msrs(family, model);
+       do_c8_c9_c10 = has_hsw_msrs(family, model);
        do_slm_cstates = is_slm(family, model);
        bclk = discover_bclk(family, model);
 
-       do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
+       do_nhm_turbo_ratio_limit = has_nhm_turbo_ratio_limit(family, model);
        do_ivt_turbo_ratio_limit = has_ivt_turbo_ratio_limit(family, model);
        rapl_probe(family, model);
+       perf_limit_reasons_probe(family, model);
 
        return;
 }
@@ -2299,10 +2487,9 @@ void setup_all_buffers(void)
 
 void turbostat_init()
 {
-       check_cpuid();
-
        check_dev_msr();
-       check_super_user();
+       check_permissions();
+       check_cpuid();
 
        setup_all_buffers();
 
@@ -2312,6 +2499,9 @@ void turbostat_init()
        if (verbose)
                for_all_cpus(print_epb, ODD_COUNTERS);
 
+       if (verbose)
+               for_all_cpus(print_perf_limit, ODD_COUNTERS);
+
        if (verbose)
                for_all_cpus(print_rapl, ODD_COUNTERS);
 
@@ -2441,7 +2631,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.7 Feb 6, 2014"
+               fprintf(stderr, "turbostat v3.9 23-Jan, 2015"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();