From: Linus Torvalds Date: Tue, 1 Jun 2010 21:12:27 +0000 (-0700) Subject: Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt... X-Git-Tag: v2.6.35-rc2~56 X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=commitdiff_plain;h=709d015bb810a3377feaee3093d110a17e919019;hp=e3a815fcd38043b8f1bb526123d8ab6ae01deb77 Merge branch 'drm-intel-next' of git://git./linux/kernel/git/anholt/drm-intel * 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (41 commits) drm/i915: add HAS_BSD check to i915_getparam drm/i915: Honor sync polarity from VBT panel timing descriptors drm/i915: Unmask interrupt for render engine on Sandybridge drm/i915: Fix PIPE_CONTROL command on Sandybridge drm/i915: Fix up address spaces in slow_kernel_write() drm/i915: Use non-atomic kmap for slow copy paths drm/i915: Avoid moving from CPU domain during pwrite drm/i915: Cleanup after failed initialization of ringbuffers drm/i915: Reject bind_to_gtt() early if object > aperture drm/i915: Check error code whilst moving buffer to GTT domain. drm/i915: Remove spurious warning "Failure to install fence" drm/i915: Rebind bo if currently bound with incorrect alignment. drm/i915: Include pitch in set_base debug statement. drm/i915: Only print "nothing to do" debug message as required. drm/i915: Propagate error from unbinding an unfenceable object. drm/i915: Avoid nesting of domain changes when setting display plane drm/i915: Hold the spinlock whilst resetting unpin_work along error path drm/i915: Only print an message if there was an error drm/i915: Clean up leftover bits from hws move to ring structure. drm/i915: Add CxSR support on Pineview DDR3 ... --- diff --git a/.gitignore b/.gitignore index a2939fc10b22..8faa6c02b39e 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ modules.builtin *.gz *.bz2 *.lzma +*.lzo *.patch *.gcno diff --git a/Documentation/.gitignore b/Documentation/.gitignore new file mode 100644 index 000000000000..bcd907b4141f --- /dev/null +++ b/Documentation/.gitignore @@ -0,0 +1,7 @@ +filesystems/dnotify_test +laptops/dslm +timers/hpet_example +vm/hugepage-mmap +vm/hugepage-shm +vm/map_hugetlb + diff --git a/Documentation/ABI/testing/sysfs-firmware-sfi b/Documentation/ABI/testing/sysfs-firmware-sfi new file mode 100644 index 000000000000..4be7d44aeacf --- /dev/null +++ b/Documentation/ABI/testing/sysfs-firmware-sfi @@ -0,0 +1,15 @@ +What: /sys/firmware/sfi/tables/ +Date: May 2010 +Contact: Len Brown +Description: + SFI defines a number of small static memory tables + so the kernel can get platform information from firmware. + + The tables are defined in the latest SFI specification: + http://simplefirmware.org/documentation + + While the tables are used by the kernel, user-space + can observe them this way: + + # cd /sys/firmware/sfi/tables + # cat $TABLENAME > $TABLENAME.bin diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 2e435adfbd6b..98ce51796f71 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -639,6 +639,36 @@ is planned to completely remove virt_to_bus() and bus_to_virt() as they are entirely deprecated. Some ports already do not provide these as it is impossible to correctly support them. + Handling Errors + +DMA address space is limited on some architectures and an allocation +failure can be determined by: + +- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 + +- checking the returned dma_addr_t of dma_map_single and dma_map_page + by using dma_mapping_error(): + + dma_addr_t dma_handle; + + dma_handle = dma_map_single(dev, addr, size, direction); + if (dma_mapping_error(dev, dma_handle)) { + /* + * reduce current DMA mapping usage, + * delay and try again later or + * reset driver. + */ + } + +Networking drivers must call dev_kfree_skb to free the socket buffer +and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook +(ndo_start_xmit). This means that the socket buffer is just dropped in +the failure case. + +SCSI drivers must return SCSI_MLQUEUE_HOST_BUSY if the DMA mapping +fails in the queuecommand hook. This means that the SCSI subsystem +passes the command to the driver again later. + Optimizing Unmap State Space Consumption On many platforms, dma_unmap_{single,page}() is simply a nop. @@ -703,42 +733,25 @@ to "Closing". 1) Struct scatterlist requirements. - Struct scatterlist must contain, at a minimum, the following - members: - - struct page *page; - unsigned int offset; - unsigned int length; - - The base address is specified by a "page+offset" pair. - - Previous versions of struct scatterlist contained a "void *address" - field that was sometimes used instead of page+offset. As of Linux - 2.5., page+offset is always used, and the "address" field has been - deleted. - -2) More to come... - - Handling Errors - -DMA address space is limited on some architectures and an allocation -failure can be determined by: - -- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 - -- checking the returned dma_addr_t of dma_map_single and dma_map_page - by using dma_mapping_error(): - - dma_addr_t dma_handle; - - dma_handle = dma_map_single(dev, addr, size, direction); - if (dma_mapping_error(dev, dma_handle)) { - /* - * reduce current DMA mapping usage, - * delay and try again later or - * reset driver. - */ - } + Don't invent the architecture specific struct scatterlist; just use + . You need to enable + CONFIG_NEED_SG_DMA_LENGTH if the architecture supports IOMMUs + (including software IOMMU). + +2) ARCH_KMALLOC_MINALIGN + + Architectures must ensure that kmalloc'ed buffer is + DMA-safe. Drivers and subsystems depend on it. If an architecture + isn't fully DMA-coherent (i.e. hardware doesn't ensure that data in + the CPU cache is identical to data in main memory), + ARCH_KMALLOC_MINALIGN must be set so that the memory allocator + makes sure that kmalloc'ed buffer doesn't share a cache line with + the others. See arch/arm/include/asm/cache.h as an example. + + Note that ARCH_KMALLOC_MINALIGN is about DMA memory alignment + constraints. You don't need to worry about the architecture data + alignment constraints (e.g. the alignment constraints about 64-bit + objects). Closing diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers index 99e72a81fa2f..4947fd8fb182 100644 --- a/Documentation/SubmittingDrivers +++ b/Documentation/SubmittingDrivers @@ -130,6 +130,8 @@ Linux kernel master tree: ftp.??.kernel.org:/pub/linux/kernel/... ?? == your country code, such as "us", "uk", "fr", etc. + http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git + Linux kernel mailing list: linux-kernel@vger.kernel.org [mail majordomo@vger.kernel.org to subscribe] @@ -160,3 +162,6 @@ How to NOT write kernel driver by Arjan van de Ven: Kernel Janitor: http://janitor.kernelnewbies.org/ + +GIT, Fast Version Control System: + http://git-scm.com/ diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt new file mode 100644 index 000000000000..dfab71848dc8 --- /dev/null +++ b/Documentation/acpi/apei/einj.txt @@ -0,0 +1,59 @@ + APEI Error INJection + ~~~~~~~~~~~~~~~~~~~~ + +EINJ provides a hardware error injection mechanism +It is very useful for debugging and testing of other APEI and RAS features. + +To use EINJ, make sure the following are enabled in your kernel +configuration: + +CONFIG_DEBUG_FS +CONFIG_ACPI_APEI +CONFIG_ACPI_APEI_EINJ + +The user interface of EINJ is debug file system, under the +directory apei/einj. The following files are provided. + +- available_error_type + Reading this file returns the error injection capability of the + platform, that is, which error types are supported. The error type + definition is as follow, the left field is the error type value, the + right field is error description. + + 0x00000001 Processor Correctable + 0x00000002 Processor Uncorrectable non-fatal + 0x00000004 Processor Uncorrectable fatal + 0x00000008 Memory Correctable + 0x00000010 Memory Uncorrectable non-fatal + 0x00000020 Memory Uncorrectable fatal + 0x00000040 PCI Express Correctable + 0x00000080 PCI Express Uncorrectable fatal + 0x00000100 PCI Express Uncorrectable non-fatal + 0x00000200 Platform Correctable + 0x00000400 Platform Uncorrectable non-fatal + 0x00000800 Platform Uncorrectable fatal + + The format of file contents are as above, except there are only the + available error type lines. + +- error_type + This file is used to set the error type value. The error type value + is defined in "available_error_type" description. + +- error_inject + Write any integer to this file to trigger the error + injection. Before this, please specify all necessary error + parameters. + +- param1 + This file is used to set the first error parameter value. Effect of + parameter depends on error_type specified. For memory error, this is + physical memory address. + +- param2 + This file is used to set the second error parameter value. Effect of + parameter depends on error_type specified. For memory error, this is + physical memory address mask. + +For more information about EINJ, please refer to ACPI specification +version 4.0, section 17.5. diff --git a/Documentation/arm/Samsung-S3C24XX/GPIO.txt b/Documentation/arm/Samsung-S3C24XX/GPIO.txt index 2af2cf39915f..816d6071669e 100644 --- a/Documentation/arm/Samsung-S3C24XX/GPIO.txt +++ b/Documentation/arm/Samsung-S3C24XX/GPIO.txt @@ -12,6 +12,8 @@ Introduction of the s3c2410 GPIO system, please read the Samsung provided data-sheet/users manual to find out the complete list. + See Documentation/arm/Samsung/GPIO.txt for the core implemetation. + GPIOLIB ------- @@ -24,8 +26,60 @@ GPIOLIB listed below will be removed (they may be marked as __deprecated in the near future). - - s3c2410_gpio_getpin - - s3c2410_gpio_setpin + The following functions now either have a s3c_ specific variant + or are merged into gpiolib. See the definitions in + arch/arm/plat-samsung/include/plat/gpio-cfg.h: + + s3c2410_gpio_setpin() gpio_set_value() or gpio_direction_output() + s3c2410_gpio_getpin() gpio_get_value() or gpio_direction_input() + s3c2410_gpio_getirq() gpio_to_irq() + s3c2410_gpio_cfgpin() s3c_gpio_cfgpin() + s3c2410_gpio_getcfg() s3c_gpio_getcfg() + s3c2410_gpio_pullup() s3c_gpio_setpull() + + +GPIOLIB conversion +------------------ + +If you need to convert your board or driver to use gpiolib from the exiting +s3c2410 api, then here are some notes on the process. + +1) If your board is exclusively using an GPIO, say to control peripheral + power, then it will require to claim the gpio with gpio_request() before + it can use it. + + It is recommended to check the return value, with at least WARN_ON() + during initialisation. + +2) The s3c2410_gpio_cfgpin() can be directly replaced with s3c_gpio_cfgpin() + as they have the same arguments, and can either take the pin specific + values, or the more generic special-function-number arguments. + +3) s3c2410_gpio_pullup() changs have the problem that whilst the + s3c2410_gpio_pullup(x, 1) can be easily translated to the + s3c_gpio_setpull(x, S3C_GPIO_PULL_NONE), the s3c2410_gpio_pullup(x, 0) + are not so easy. + + The s3c2410_gpio_pullup(x, 0) case enables the pull-up (or in the case + of some of the devices, a pull-down) and as such the new API distinguishes + between the UP and DOWN case. There is currently no 'just turn on' setting + which may be required if this becomes a problem. + +4) s3c2410_gpio_setpin() can be replaced by gpio_set_value(), the old call + does not implicitly configure the relevant gpio to output. The gpio + direction should be changed before using gpio_set_value(). + +5) s3c2410_gpio_getpin() is replaceable by gpio_get_value() if the pin + has been set to input. It is currently unknown what the behaviour is + when using gpio_get_value() on an output pin (s3c2410_gpio_getpin + would return the value the pin is supposed to be outputting). + +6) s3c2410_gpio_getirq() should be directly replacable with the + gpio_to_irq() call. + +The s3c2410_gpio and gpio_ calls have always operated on the same gpio +numberspace, so there is no problem with converting the gpio numbering +between the calls. Headers @@ -54,6 +108,11 @@ PIN Numbers eg S3C2410_GPA(0) or S3C2410_GPF(1). These defines are used to tell the GPIO functions which pin is to be used. + With the conversion to gpiolib, there is no longer a direct conversion + from gpio pin number to register base address as in earlier kernels. This + is due to the number space required for newer SoCs where the later + GPIOs are not contiguous. + Configuring a pin ----------------- @@ -71,6 +130,8 @@ Configuring a pin which would turn GPA(0) into the lowest Address line A0, and set GPE(8) to be connected to the SDIO/MMC controller's SDDAT1 line. + The s3c_gpio_cfgpin() call is a functional replacement for this call. + Reading the current configuration --------------------------------- @@ -82,6 +143,9 @@ Reading the current configuration The return value will be from the same set of values which can be passed to s3c2410_gpio_cfgpin(). + The s3c_gpio_getcfg() call should be a functional replacement for + this call. + Configuring a pull-up resistor ------------------------------ @@ -95,6 +159,10 @@ Configuring a pull-up resistor Where the to value is zero to set the pull-up off, and 1 to enable the specified pull-up. Any other values are currently undefined. + The s3c_gpio_setpull() offers similar functionality, but with the + ability to encode whether the pull is up or down. Currently there + is no 'just on' state, so up or down must be selected. + Getting the state of a PIN -------------------------- @@ -106,6 +174,9 @@ Getting the state of a PIN This will return either zero or non-zero. Do not count on this function returning 1 if the pin is set. + This call is now implemented by the relevant gpiolib calls, convert + your board or driver to use gpiolib. + Setting the state of a PIN -------------------------- @@ -117,6 +188,9 @@ Setting the state of a PIN Which sets the given pin to the value. Use 0 to write 0, and 1 to set the output to 1. + This call is now implemented by the relevant gpiolib calls, convert + your board or driver to use gpiolib. + Getting the IRQ number associated with a PIN -------------------------------------------- @@ -128,6 +202,9 @@ Getting the IRQ number associated with a PIN Note, not all pins have an IRQ. + This call is now implemented by the relevant gpiolib calls, convert + your board or driver to use gpiolib. + Authour ------- diff --git a/Documentation/arm/Samsung-S3C24XX/Overview.txt b/Documentation/arm/Samsung-S3C24XX/Overview.txt index 081892df4fda..c12bfc1a00c9 100644 --- a/Documentation/arm/Samsung-S3C24XX/Overview.txt +++ b/Documentation/arm/Samsung-S3C24XX/Overview.txt @@ -8,10 +8,16 @@ Introduction The Samsung S3C24XX range of ARM9 System-on-Chip CPUs are supported by the 's3c2410' architecture of ARM Linux. Currently the S3C2410, - S3C2412, S3C2413, S3C2440, S3C2442 and S3C2443 devices are supported. + S3C2412, S3C2413, S3C2416 S3C2440, S3C2442, S3C2443 and S3C2450 devices + are supported. Support for the S3C2400 and S3C24A0 series are in progress. + The S3C2416 and S3C2450 devices are very similar and S3C2450 support is + included under the arch/arm/mach-s3c2416 directory. Note, whilst core + support for these SoCs is in, work on some of the extra peripherals + and extra interrupts is still ongoing. + Configuration ------------- @@ -209,6 +215,13 @@ GPIO Newer kernels carry GPIOLIB, and support is being moved towards this with some of the older support in line to be removed. + As of v2.6.34, the move towards using gpiolib support is almost + complete, and very little of the old calls are left. + + See Documentation/arm/Samsung-S3C24XX/GPIO.txt for the S3C24XX specific + support and Documentation/arm/Samsung/GPIO.txt for the core Samsung + implementation. + Clock Management ---------------- diff --git a/Documentation/arm/Samsung/GPIO.txt b/Documentation/arm/Samsung/GPIO.txt new file mode 100644 index 000000000000..05850c62abeb --- /dev/null +++ b/Documentation/arm/Samsung/GPIO.txt @@ -0,0 +1,42 @@ + Samsung GPIO implementation + =========================== + +Introduction +------------ + +This outlines the Samsung GPIO implementation and the architecture +specfic calls provided alongisde the drivers/gpio core. + + +S3C24XX (Legacy) +---------------- + +See Documentation/arm/Samsung-S3C24XX/GPIO.txt for more information +about these devices. Their implementation is being brought into line +with the core samsung implementation described in this document. + + +GPIOLIB integration +------------------- + +The gpio implementation uses gpiolib as much as possible, only providing +specific calls for the items that require Samsung specific handling, such +as pin special-function or pull resistor control. + +GPIO numbering is synchronised between the Samsung and gpiolib system. + + +PIN configuration +----------------- + +Pin configuration is specific to the Samsung architecutre, with each SoC +registering the necessary information for the core gpio configuration +implementation to configure pins as necessary. + +The s3c_gpio_cfgpin() and s3c_gpio_setpull() provide the means for a +driver or machine to change gpio configuration. + +See arch/arm/plat-samsung/include/plat/gpio-cfg.h for more information +on these functions. + + diff --git a/Documentation/arm/Samsung/Overview.txt b/Documentation/arm/Samsung/Overview.txt index 7cced1fea9c3..c3094ea51aa7 100644 --- a/Documentation/arm/Samsung/Overview.txt +++ b/Documentation/arm/Samsung/Overview.txt @@ -13,9 +13,10 @@ Introduction - S3C24XX: See Documentation/arm/Samsung-S3C24XX/Overview.txt for full list - S3C64XX: S3C6400 and S3C6410 - - S5PC6440 - - S5PC100 and S5PC110 support is currently being merged + - S5P6440 + - S5P6442 + - S5PC100 + - S5PC110 / S5PV210 S3C24XX Systems @@ -35,7 +36,10 @@ Configuration unifying all the SoCs into one kernel. s5p6440_defconfig - S5P6440 specific default configuration + s5p6442_defconfig - S5P6442 specific default configuration s5pc100_defconfig - S5PC100 specific default configuration + s5pc110_defconfig - S5PC110 specific default configuration + s5pv210_defconfig - S5PV210 specific default configuration Layout @@ -50,18 +54,27 @@ Layout specific information. It contains the base clock, GPIO and device definitions to get the system running. - plat-s3c is the s3c24xx/s3c64xx platform directory, although it is currently - involved in other builds this will be phased out once the relevant code is - moved elsewhere. - plat-s3c24xx is for s3c24xx specific builds, see the S3C24XX docs. - plat-s3c64xx is for the s3c64xx specific bits, see the S3C24XX docs. + plat-s5p is for s5p specific builds, and contains common support for the + S5P specific systems. Not all S5Ps use all the features in this directory + due to differences in the hardware. + + +Layout changes +-------------- + + The old plat-s3c and plat-s5pc1xx directories have been removed, with + support moved to either plat-samsung or plat-s5p as necessary. These moves + where to simplify the include and dependency issues involved with having + so many different platform directories. - plat-s5p is for s5p specific builds, more to be added. + It was decided to remove plat-s5pc1xx as some of the support was already + in plat-s5p or plat-samsung, with the S5PC110 support added with S5PV210 + the only user was the S5PC100. The S5PC100 specific items where moved to + arch/arm/mach-s5pc100. - [ to finish ] Port Contributors diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index 57444c2609fc..b34823ff1646 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt @@ -339,7 +339,7 @@ To mount a cgroup hierarchy with all available subsystems, type: The "xxx" is not interpreted by the cgroup code, but will appear in /proc/mounts so may be any useful identifying string that you like. -To mount a cgroup hierarchy with just the cpuset and numtasks +To mount a cgroup hierarchy with just the cpuset and memory subsystems, type: # mount -t cgroup -o cpuset,memory hier1 /dev/cgroup diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 6cab1f29da4c..7781857dc940 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -1,18 +1,15 @@ Memory Resource Controller NOTE: The Memory Resource Controller has been generically been referred -to as the memory controller in this document. Do not confuse memory controller -used here with the memory controller that is used in hardware. + to as the memory controller in this document. Do not confuse memory + controller used here with the memory controller that is used in hardware. -Salient features - -a. Enable control of Anonymous, Page Cache (mapped and unmapped) and - Swap Cache memory pages. -b. The infrastructure allows easy addition of other types of memory to control -c. Provides *zero overhead* for non memory controller users -d. Provides a double LRU: global memory pressure causes reclaim from the - global LRU; a cgroup on hitting a limit, reclaims from the per - cgroup LRU +(For editors) +In this document: + When we mention a cgroup (cgroupfs's directory) with memory controller, + we call it "memory cgroup". When you see git-log and source code, you'll + see patch's title and function names tend to use "memcg". + In this document, we avoid using it. Benefits and Purpose of the memory controller @@ -33,6 +30,45 @@ d. A CD/DVD burner could control the amount of memory used by the e. There are several other use cases, find one or use the controller just for fun (to learn and hack on the VM subsystem). +Current Status: linux-2.6.34-mmotm(development version of 2010/April) + +Features: + - accounting anonymous pages, file caches, swap caches usage and limiting them. + - private LRU and reclaim routine. (system's global LRU and private LRU + work independently from each other) + - optionally, memory+swap usage can be accounted and limited. + - hierarchical accounting + - soft limit + - moving(recharging) account at moving a task is selectable. + - usage threshold notifier + - oom-killer disable knob and oom-notifier + - Root cgroup has no limit controls. + + Kernel memory and Hugepages are not under control yet. We just manage + pages on LRU. To add more controls, we have to take care of performance. + +Brief summary of control files. + + tasks # attach a task(thread) and show list of threads + cgroup.procs # show list of processes + cgroup.event_control # an interface for event_fd() + memory.usage_in_bytes # show current memory(RSS+Cache) usage. + memory.memsw.usage_in_bytes # show current memory+Swap usage + memory.limit_in_bytes # set/show limit of memory usage + memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage + memory.failcnt # show the number of memory usage hits limits + memory.memsw.failcnt # show the number of memory+Swap hits limits + memory.max_usage_in_bytes # show max memory usage recorded + memory.memsw.usage_in_bytes # show max memory+Swap usage recorded + memory.soft_limit_in_bytes # set/show soft limit of memory usage + memory.stat # show various statistics + memory.use_hierarchy # set/show hierarchical account enabled + memory.force_empty # trigger forced move charge to parent + memory.swappiness # set/show swappiness parameter of vmscan + (See sysctl's vm.swappiness) + memory.move_charge_at_immigrate # set/show controls of moving charges + memory.oom_control # set/show oom controls. + 1. History The memory controller has a long history. A request for comments for the memory @@ -106,14 +142,14 @@ the necessary data structures and check if the cgroup that is being charged is over its limit. If it is then reclaim is invoked on the cgroup. More details can be found in the reclaim section of this document. If everything goes well, a page meta-data-structure called page_cgroup is -allocated and associated with the page. This routine also adds the page to -the per cgroup LRU. +updated. page_cgroup has its own LRU on cgroup. +(*) page_cgroup structure is allocated at boot/memory-hotplug time. 2.2.1 Accounting details All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. -(some pages which never be reclaimable and will not be on global LRU - are not accounted. we just accounts pages under usual vm management.) +Some pages which are never reclaimable and will not be on the global LRU +are not accounted. We just account pages under usual VM management. RSS pages are accounted at page_fault unless they've already been accounted for earlier. A file page will be accounted for as Page Cache when it's @@ -121,12 +157,19 @@ inserted into inode (radix-tree). While it's mapped into the page tables of processes, duplicate accounting is carefully avoided. A RSS page is unaccounted when it's fully unmapped. A PageCache page is -unaccounted when it's removed from radix-tree. +unaccounted when it's removed from radix-tree. Even if RSS pages are fully +unmapped (by kswapd), they may exist as SwapCache in the system until they +are really freed. Such SwapCaches also also accounted. +A swapped-in page is not accounted until it's mapped. + +Note: The kernel does swapin-readahead and read multiple swaps at once. +This means swapped-in pages may contain pages for other tasks than a task +causing page fault. So, we avoid accounting at swap-in I/O. At page migration, accounting information is kept. -Note: we just account pages-on-lru because our purpose is to control amount -of used pages. not-on-lru pages are tend to be out-of-control from vm view. +Note: we just account pages-on-LRU because our purpose is to control amount +of used pages; not-on-LRU pages tend to be out-of-control from VM view. 2.3 Shared Page Accounting @@ -143,6 +186,7 @@ caller of swapoff rather than the users of shmem. 2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP) + Swap Extension allows you to record charge for swap. A swapped-in page is charged back to original page allocator if possible. @@ -150,13 +194,20 @@ When swap is accounted, following files are added. - memory.memsw.usage_in_bytes. - memory.memsw.limit_in_bytes. -usage of mem+swap is limited by memsw.limit_in_bytes. +memsw means memory+swap. Usage of memory+swap is limited by +memsw.limit_in_bytes. -* why 'mem+swap' rather than swap. +Example: Assume a system with 4G of swap. A task which allocates 6G of memory +(by mistake) under 2G memory limitation will use all swap. +In this case, setting memsw.limit_in_bytes=3G will prevent bad use of swap. +By using memsw limit, you can avoid system OOM which can be caused by swap +shortage. + +* why 'memory+swap' rather than swap. The global LRU(kswapd) can swap out arbitrary pages. Swap-out means to move account from memory to swap...there is no change in usage of -mem+swap. In other words, when we want to limit the usage of swap without -affecting global LRU, mem+swap limit is better than just limiting swap from +memory+swap. In other words, when we want to limit the usage of swap without +affecting global LRU, memory+swap limit is better than just limiting swap from OS point of view. * What happens when a cgroup hits memory.memsw.limit_in_bytes @@ -168,12 +219,12 @@ it by cgroup. 2.5 Reclaim -Each cgroup maintains a per cgroup LRU that consists of an active -and inactive list. When a cgroup goes over its limit, we first try +Each cgroup maintains a per cgroup LRU which has the same structure as +global VM. When a cgroup goes over its limit, we first try to reclaim memory from the cgroup so as to make space for the new pages that the cgroup has touched. If the reclaim is unsuccessful, an OOM routine is invoked to select and kill the bulkiest task in the -cgroup. +cgroup. (See 10. OOM Control below.) The reclaim algorithm has not been modified for cgroups, except that pages that are selected for reclaiming come from the per cgroup LRU @@ -184,13 +235,22 @@ limits on the root cgroup. Note2: When panic_on_oom is set to "2", the whole system will panic. -2. Locking +When oom event notifier is registered, event will be delivered. +(See oom_control section) + +2.6 Locking -The memory controller uses the following hierarchy + lock_page_cgroup()/unlock_page_cgroup() should not be called under + mapping->tree_lock. -1. zone->lru_lock is used for selecting pages to be isolated -2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone) -3. lock_page_cgroup() is used to protect page->page_cgroup + Other lock order is following: + PG_locked. + mm->page_table_lock + zone->lru_lock + lock_page_cgroup. + In many cases, just lock_page_cgroup() is called. + per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by + zone->lru_lock, it has no lock of its own. 3. User Interface @@ -199,6 +259,7 @@ The memory controller uses the following hierarchy a. Enable CONFIG_CGROUPS b. Enable CONFIG_RESOURCE_COUNTERS c. Enable CONFIG_CGROUP_MEM_RES_CTLR +d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 1. Prepare the cgroups # mkdir -p /cgroups @@ -206,31 +267,28 @@ c. Enable CONFIG_CGROUP_MEM_RES_CTLR 2. Make the new group and move bash into it # mkdir /cgroups/0 -# echo $$ > /cgroups/0/tasks +# echo $$ > /cgroups/0/tasks -Since now we're in the 0 cgroup, -We can alter the memory limit: +Since now we're in the 0 cgroup, we can alter the memory limit: # echo 4M > /cgroups/0/memory.limit_in_bytes NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, -mega or gigabytes. +mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.) + NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). NOTE: We cannot set limits on the root cgroup any more. # cat /cgroups/0/memory.limit_in_bytes 4194304 -NOTE: The interface has now changed to display the usage in bytes -instead of pages - We can check the usage: # cat /cgroups/0/memory.usage_in_bytes 1216512 A successful write to this file does not guarantee a successful set of -this limit to the value written into the file. This can be due to a +this limit to the value written into the file. This can be due to a number of factors, such as rounding up to page boundaries or the total -availability of memory on the system. The user is required to re-read +availability of memory on the system. The user is required to re-read this file after a write to guarantee the value committed by the kernel. # echo 1 > memory.limit_in_bytes @@ -245,15 +303,23 @@ caches, RSS and Active pages/Inactive pages are shown. 4. Testing -Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11]. -Apart from that v6 has been tested with several applications and regular -daily use. The controller has also been tested on the PPC64, x86_64 and -UML platforms. +For testing features and implementation, see memcg_test.txt. + +Performance test is also important. To see pure memory controller's overhead, +testing on tmpfs will give you good numbers of small overheads. +Example: do kernel make on tmpfs. + +Page-fault scalability is also important. At measuring parallel +page fault test, multi-process test may be better than multi-thread +test because it has noise of shared objects/status. + +But the above two are testing extreme situations. +Trying usual test under memory controller is always helpful. 4.1 Troubleshooting Sometimes a user might find that the application under a cgroup is -terminated. There are several causes for this: +terminated by OOM killer. There are several causes for this: 1. The cgroup limit is too low (just too low to do anything useful) 2. The user is using anonymous memory and swap is turned off or too low @@ -261,6 +327,9 @@ terminated. There are several causes for this: A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of some of the pages cached in the cgroup (page cache pages). +To know what happens, disable OOM_Kill by 10. OOM Control(see below) and +seeing what happens will be helpful. + 4.2 Task migration When a task migrates from one cgroup to another, its charge is not @@ -268,16 +337,19 @@ carried forward by default. The pages allocated from the original cgroup still remain charged to it, the charge is dropped when the page is freed or reclaimed. -Note: You can move charges of a task along with task migration. See 8. +You can move charges of a task along with task migration. +See 8. "Move charges at task migration" 4.3 Removing a cgroup A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a cgroup might have some charge associated with it, even though all -tasks have migrated away from it. -Such charges are freed(at default) or moved to its parent. When moved, -both of RSS and CACHES are moved to parent. -If both of them are busy, rmdir() returns -EBUSY. See 5.1 Also. +tasks have migrated away from it. (because we charge against pages, not +against tasks.) + +Such charges are freed or moved to their parent. At moving, both of RSS +and CACHES are moved to parent. +rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also. Charges recorded in swap information is not updated at removal of cgroup. Recorded information is discarded and a cgroup which uses swap (swapcache) @@ -293,10 +365,10 @@ will be charged as a new owner of it. # echo 0 > memory.force_empty - Almost all pages tracked by this memcg will be unmapped and freed. Some of - pages cannot be freed because it's locked or in-use. Such pages are moved - to parent and this cgroup will be empty. But this may return -EBUSY in - some too busy case. + Almost all pages tracked by this memory cgroup will be unmapped and freed. + Some pages cannot be freed because they are locked or in-use. Such pages are + moved to parent and this cgroup will be empty. This may return -EBUSY if + VM is too busy to free/move all pages immediately. Typical use case of this interface is that calling this before rmdir(). Because rmdir() moves all pages to parent, some out-of-use page caches can be @@ -306,19 +378,41 @@ will be charged as a new owner of it. memory.stat file includes following statistics +# per-memory cgroup local status cache - # of bytes of page cache memory. rss - # of bytes of anonymous and swap cache memory. +mapped_file - # of bytes of mapped file (includes tmpfs/shmem) pgpgin - # of pages paged in (equivalent to # of charging events). pgpgout - # of pages paged out (equivalent to # of uncharging events). -active_anon - # of bytes of anonymous and swap cache memory on active - lru list. +swap - # of bytes of swap usage inactive_anon - # of bytes of anonymous memory and swap cache memory on - inactive lru list. -active_file - # of bytes of file-backed memory on active lru list. -inactive_file - # of bytes of file-backed memory on inactive lru list. + LRU list. +active_anon - # of bytes of anonymous and swap cache memory on active + inactive LRU list. +inactive_file - # of bytes of file-backed memory on inactive LRU list. +active_file - # of bytes of file-backed memory on active LRU list. unevictable - # of bytes of memory that cannot be reclaimed (mlocked etc). -The following additional stats are dependent on CONFIG_DEBUG_VM. +# status considering hierarchy (see memory.use_hierarchy settings) + +hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy + under which the memory cgroup is +hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to + hierarchy under which memory cgroup is. + +total_cache - sum of all children's "cache" +total_rss - sum of all children's "rss" +total_mapped_file - sum of all children's "cache" +total_pgpgin - sum of all children's "pgpgin" +total_pgpgout - sum of all children's "pgpgout" +total_swap - sum of all children's "swap" +total_inactive_anon - sum of all children's "inactive_anon" +total_active_anon - sum of all children's "active_anon" +total_inactive_file - sum of all children's "inactive_file" +total_active_file - sum of all children's "active_file" +total_unevictable - sum of all children's "unevictable" + +# The following additional stats are dependent on CONFIG_DEBUG_VM. inactive_ratio - VM internal parameter. (see mm/page_alloc.c) recent_rotated_anon - VM internal parameter. (see mm/vmscan.c) @@ -327,24 +421,37 @@ recent_scanned_anon - VM internal parameter. (see mm/vmscan.c) recent_scanned_file - VM internal parameter. (see mm/vmscan.c) Memo: - recent_rotated means recent frequency of lru rotation. - recent_scanned means recent # of scans to lru. + recent_rotated means recent frequency of LRU rotation. + recent_scanned means recent # of scans to LRU. showing for better debug please see the code for meanings. Note: Only anonymous and swap cache memory is listed as part of 'rss' stat. This should not be confused with the true 'resident set size' or the - amount of physical memory used by the cgroup. Per-cgroup rss - accounting is not done yet. + amount of physical memory used by the cgroup. + 'rss + file_mapped" will give you resident set size of cgroup. + (Note: file and shmem may be shared among other cgroups. In that case, + file_mapped is accounted only when the memory cgroup is owner of page + cache.) 5.3 swappiness - Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. - Following cgroups' swappiness can't be changed. - - root cgroup (uses /proc/sys/vm/swappiness). - - a cgroup which uses hierarchy and it has child cgroup. - - a cgroup which uses hierarchy and not the root of hierarchy. +Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. +Following cgroups' swappiness can't be changed. +- root cgroup (uses /proc/sys/vm/swappiness). +- a cgroup which uses hierarchy and it has other cgroup(s) below it. +- a cgroup which uses hierarchy and not the root of hierarchy. + +5.4 failcnt + +A memory cgroup provides memory.failcnt and memory.memsw.failcnt files. +This failcnt(== failure count) shows the number of times that a usage counter +hit its limit. When a memory cgroup hits a limit, failcnt increases and +memory under it will be reclaimed. + +You can reset failcnt by writing 0 to failcnt file. +# echo 0 > .../memory.failcnt 6. Hierarchy support @@ -363,13 +470,13 @@ hierarchy In the diagram above, with hierarchical accounting enabled, all memory usage of e, is accounted to its ancestors up until the root (i.e, c and root), -that has memory.use_hierarchy enabled. If one of the ancestors goes over its +that has memory.use_hierarchy enabled. If one of the ancestors goes over its limit, the reclaim algorithm reclaims from the tasks in the ancestor and the children of the ancestor. 6.1 Enabling hierarchical accounting and reclaim -The memory controller by default disables the hierarchy feature. Support +A memory cgroup by default disables the hierarchy feature. Support can be enabled by writing 1 to memory.use_hierarchy file of the root cgroup # echo 1 > memory.use_hierarchy @@ -379,10 +486,10 @@ The feature can be disabled by # echo 0 > memory.use_hierarchy NOTE1: Enabling/disabling will fail if the cgroup already has other -cgroups created below it. + cgroups created below it. NOTE2: When panic_on_oom is set to "2", the whole system will panic in -case of an oom event in any cgroup. + case of an OOM event in any cgroup. 7. Soft limits @@ -392,7 +499,7 @@ is to allow control groups to use as much of the memory as needed, provided a. There is no memory contention b. They do not exceed their hard limit -When the system detects memory contention or low memory control groups +When the system detects memory contention or low memory, control groups are pushed back to their soft limits. If the soft limit of each control group is very high, they are pushed back as much as possible to make sure that one control group does not starve the others of memory. @@ -406,7 +513,7 @@ it gets invoked from balance_pgdat (kswapd). 7.1 Interface Soft limits can be setup by using the following commands (in this example we -assume a soft limit of 256 megabytes) +assume a soft limit of 256 MiB) # echo 256M > memory.soft_limit_in_bytes @@ -442,7 +549,7 @@ Note: Charges are moved only when you move mm->owner, IOW, a leader of a thread Note: If we cannot find enough space for the task in the destination cgroup, we try to make space by reclaiming memory. Task migration may fail if we cannot make enough space. -Note: It can take several seconds if you move charges in giga bytes order. +Note: It can take several seconds if you move charges much. And if you want disable it again: @@ -451,21 +558,27 @@ And if you want disable it again: 8.2 Type of charges which can be move Each bits of move_charge_at_immigrate has its own meaning about what type of -charges should be moved. +charges should be moved. But in any cases, it must be noted that an account of +a page or a swap can be moved only when it is charged to the task's current(old) +memory cgroup. bit | what type of charges would be moved ? -----+------------------------------------------------------------------------ 0 | A charge of an anonymous page(or swap of it) used by the target task. | Those pages and swaps must be used only by the target task. You must | enable Swap Extension(see 2.4) to enable move of swap charges. - -Note: Those pages and swaps must be charged to the old cgroup. -Note: More type of pages(e.g. file cache, shmem,) will be supported by other - bits in future. + -----+------------------------------------------------------------------------ + 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory) + | and swaps of tmpfs file) mmapped by the target task. Unlike the case of + | anonymous pages, file pages(and swaps) in the range mmapped by the task + | will be moved even if the task hasn't done page fault, i.e. they might + | not be the task's "RSS", but other task's "RSS" that maps the same file. + | And mapcount of the page is ignored(the page can be moved even if + | page_mapcount(page) > 1). You must enable Swap Extension(see 2.4) to + | enable move of swap charges. 8.3 TODO -- Add support for other types of pages(e.g. file cache, shmem, etc.). - Implement madvise(2) to let users decide the vma to be moved or not to be moved. - All of moving charge operations are done under cgroup_mutex. It's not good @@ -473,22 +586,61 @@ Note: More type of pages(e.g. file cache, shmem,) will be supported by other 9. Memory thresholds -Memory controler implements memory thresholds using cgroups notification +Memory cgroup implements memory thresholds using cgroups notification API (see cgroups.txt). It allows to register multiple memory and memsw thresholds and gets notifications when it crosses. To register a threshold application need: - - create an eventfd using eventfd(2); - - open memory.usage_in_bytes or memory.memsw.usage_in_bytes; - - write string like " " to - cgroup.event_control. +- create an eventfd using eventfd(2); +- open memory.usage_in_bytes or memory.memsw.usage_in_bytes; +- write string like " " to + cgroup.event_control. Application will be notified through eventfd when memory usage crosses threshold in any direction. It's applicable for root and non-root cgroup. -10. TODO +10. OOM Control + +memory.oom_control file is for OOM notification and other controls. + +Memory cgroup implements OOM notifier using cgroup notification +API (See cgroups.txt). It allows to register multiple OOM notification +delivery and gets notification when OOM happens. + +To register a notifier, application need: + - create an eventfd using eventfd(2) + - open memory.oom_control file + - write string like " " to + cgroup.event_control + +Application will be notified through eventfd when OOM happens. +OOM notification doesn't work for root cgroup. + +You can disable OOM-killer by writing "1" to memory.oom_control file, as: + + #echo 1 > memory.oom_control + +This operation is only allowed to the top cgroup of sub-hierarchy. +If OOM-killer is disabled, tasks under cgroup will hang/sleep +in memory cgroup's OOM-waitqueue when they request accountable memory. + +For running them, you have to relax the memory cgroup's OOM status by + * enlarge limit or reduce usage. +To reduce usage, + * kill some tasks. + * move some tasks to other group with account migration. + * remove some files (on tmpfs?) + +Then, stopped tasks will work again. + +At reading, current status of OOM is shown. + oom_kill_disable 0 or 1 (if 1, oom-killer is disabled) + under_oom 0 or 1 (if 1, the memory cgroup is under OOM, tasks may + be stopped.) + +11. TODO 1. Add support for accounting huge pages (as a separate controller) 2. Make per-cgroup scanner reclaim not-shared pages first diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index a86152ae2f6f..672be0109d02 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -646,3 +646,13 @@ Who: Thomas Gleixner ---------------------------- +What: old ieee1394 subsystem (CONFIG_IEEE1394) +When: 2.6.37 +Files: drivers/ieee1394/ except init_ohci1394_dma.c +Why: superseded by drivers/firewire/ (CONFIG_FIREWIRE) which offers more + features, better performance, and better security, all with smaller + and more modern code base +Who: Stefan Richter + +---------------------------- + diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index af1608070cd5..96d4293607ec 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -380,7 +380,7 @@ prototypes: int (*open) (struct inode *, struct file *); int (*flush) (struct file *); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, struct dentry *, int datasync); + int (*fsync) (struct file *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); @@ -429,8 +429,9 @@ check_flags: no implementations. If your fs is not using generic_file_llseek, you need to acquire and release the appropriate locks in your ->llseek(). For many filesystems, it is probably safe to acquire the inode -mutex. Note some filesystems (i.e. remote ones) provide no -protection for i_size so you will need to use the BKL. +mutex or just to use i_size_read() instead. +Note: this does not protect the file->f_pos against concurrent modifications +since this is something the userspace has to take care about. Note: ext2_release() was *the* source of contention on fs-intensive loads and dropping BKL on ->release() helps to get rid of that (we still diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index b66858538df5..94677e7dcb13 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -401,11 +401,16 @@ otherwise noted. started might not be in the page cache at the end of the walk). - truncate: called by the VFS to change the size of a file. The + truncate: Deprecated. This will not be called if ->setsize is defined. + Called by the VFS to change the size of a file. The i_size field of the inode is set to the desired size by the VFS before this method is called. This method is called by the truncate(2) system call and related functionality. + Note: ->truncate and vmtruncate are deprecated. Do not add new + instances/calls of these. Filesystems should be converted to do their + truncate sequence via ->setattr(). + permission: called by the VFS to check for access rights on a POSIX-like filesystem. @@ -729,7 +734,7 @@ struct file_operations { int (*open) (struct inode *, struct file *); int (*flush) (struct file *); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, struct dentry *, int datasync); + int (*fsync) (struct file *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737 index 001d2e70bc11..fc5df7654d63 100644 --- a/Documentation/hwmon/dme1737 +++ b/Documentation/hwmon/dme1737 @@ -9,11 +9,15 @@ Supported chips: * SMSC SCH3112, SCH3114, SCH3116 Prefix: 'sch311x' Addresses scanned: none, address read from Super-I/O config space - Datasheet: http://www.nuhorizons.com/FeaturedProducts/Volume1/SMSC/311x.pdf + Datasheet: Available on the Internet * SMSC SCH5027 Prefix: 'sch5027' Addresses scanned: I2C 0x2c, 0x2d, 0x2e Datasheet: Provided by SMSC upon request and under NDA + * SMSC SCH5127 + Prefix: 'sch5127' + Addresses scanned: none, address read from Super-I/O config space + Datasheet: Provided by SMSC upon request and under NDA Authors: Juerg Haefliger @@ -36,8 +40,8 @@ Description ----------- This driver implements support for the hardware monitoring capabilities of the -SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, and SMSC -SCH311x Super-I/O chips. These chips feature monitoring of 3 temp sensors +SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x, +and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and 1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and @@ -48,14 +52,14 @@ Fan[3-6] and pwm[3,5-6] are optional features and their availability depends on the configuration of the chip. The driver will detect which features are present during initialization and create the sysfs attributes accordingly. -For the SCH311x, fan[1-3] and pwm[1-3] are always present and fan[4-6] and -pwm[5-6] don't exist. +For the SCH311x and SCH5127, fan[1-3] and pwm[1-3] are always present and +fan[4-6] and pwm[5-6] don't exist. The hardware monitoring features of the DME1737, A8000, and SCH5027 are only -accessible via SMBus, while the SCH311x only provides access via the ISA bus. -The driver will therefore register itself as an I2C client driver if it detects -a DME1737, A8000, or SCH5027 and as a platform driver if it detects a SCH311x -chip. +accessible via SMBus, while the SCH311x and SCH5127 only provide access via +the ISA bus. The driver will therefore register itself as an I2C client driver +if it detects a DME1737, A8000, or SCH5027 and as a platform driver if it +detects a SCH311x or SCH5127 chip. Voltage Monitoring @@ -76,7 +80,7 @@ DME1737, A8000: in6: Vbat (+3.0V) 0V - 4.38V SCH311x: - in0: +2.5V 0V - 6.64V + in0: +2.5V 0V - 3.32V in1: Vccp (processor core) 0V - 2V in2: VCC (internal +3.3V) 0V - 4.38V in3: +5V 0V - 6.64V @@ -93,6 +97,15 @@ SCH5027: in5: VTR (+3.3V standby) 0V - 4.38V in6: Vbat (+3.0V) 0V - 4.38V +SCH5127: + in0: +2.5 0V - 3.32V + in1: Vccp (processor core) 0V - 3V + in2: VCC (internal +3.3V) 0V - 4.38V + in3: V2_IN 0V - 1.5V + in4: V1_IN 0V - 1.5V + in5: VTR (+3.3V standby) 0V - 4.38V + in6: Vbat (+3.0V) 0V - 4.38V + Each voltage input has associated min and max limits which trigger an alarm when crossed. @@ -293,3 +306,21 @@ pwm[1-3]_auto_point1_pwm RW Auto PWM pwm point. Auto_point1 is the pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the full-speed duty-cycle which is hard- wired to 255 (100% duty-cycle). + +Chip Differences +---------------- + +Feature dme1737 sch311x sch5027 sch5127 +------------------------------------------------------- +temp[1-3]_offset yes yes +vid yes +zone3 yes yes yes +zone[1-3]_hyst yes yes +pwm min/off yes yes +fan3 opt yes opt yes +pwm3 opt yes opt yes +fan4 opt opt +fan5 opt opt +pwm5 opt opt +fan6 opt opt +pwm6 opt opt diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63 index 31660bf97979..b9843eab1afb 100644 --- a/Documentation/hwmon/lm63 +++ b/Documentation/hwmon/lm63 @@ -7,6 +7,11 @@ Supported chips: Addresses scanned: I2C 0x4c Datasheet: Publicly available at the National Semiconductor website http://www.national.com/pf/LM/LM63.html + * National Semiconductor LM64 + Prefix: 'lm64' + Addresses scanned: I2C 0x18 and 0x4e + Datasheet: Publicly available at the National Semiconductor website + http://www.national.com/pf/LM/LM64.html Author: Jean Delvare @@ -55,3 +60,5 @@ The lm63 driver will not update its values more frequently than every second; reading them more often will do no harm, but will return 'old' values. +The LM64 is effectively an LM63 with GPIO lines. The driver does not +support these GPIO lines at present. diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245 index 02838a47d862..86b5880d8502 100644 --- a/Documentation/hwmon/ltc4245 +++ b/Documentation/hwmon/ltc4245 @@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm in7_min_alarm 3v output undervoltage alarm in8_min_alarm Vee (-12v) output undervoltage alarm -in9_input GPIO #1 voltage data -in10_input GPIO #2 voltage data -in11_input GPIO #3 voltage data +in9_input GPIO voltage data power1_input 12v power usage (mW) power2_input 5v power usage (mW) diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface index 3de6b0bcb147..d4e2917c6f18 100644 --- a/Documentation/hwmon/sysfs-interface +++ b/Documentation/hwmon/sysfs-interface @@ -80,9 +80,9 @@ All entries (except name) are optional, and should only be created in a given driver if the chip has the feature. -******** -* Name * -******** +********************* +* Global attributes * +********************* name The chip name. This should be a short, lowercase string, not containing @@ -91,6 +91,13 @@ name The chip name. I2C devices get this attribute created automatically. RO +update_rate The rate at which the chip will update readings. + Unit: millisecond + RW + Some devices have a variable update rate. This attribute + can be used to change the update rate to the desired + frequency. + ************ * Voltages * diff --git a/Documentation/hwmon/tmp102 b/Documentation/hwmon/tmp102 new file mode 100644 index 000000000000..8454a7763122 --- /dev/null +++ b/Documentation/hwmon/tmp102 @@ -0,0 +1,26 @@ +Kernel driver tmp102 +==================== + +Supported chips: + * Texas Instruments TMP102 + Prefix: 'tmp102' + Addresses scanned: none + Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp102.html + +Author: + Steven King + +Description +----------- + +The Texas Instruments TMP102 implements one temperature sensor. Limits can be +set through the Overtemperature Shutdown register and Hysteresis register. The +sensor is accurate to 0.5 degree over the range of -25 to +85 C, and to 1.0 +degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The +operating temperature has a minimum of -55 C and a maximum of +150 C. + +The TMP102 has a programmable update rate that can select between 8, 4, 1, and +0.5 Hz. (Currently the driver only supports the default of 4 Hz). + +The driver provides the common sysfs-interface for temperatures (see +Documentation/hwmon/sysfs-interface under Temperatures). diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index 6f8c1cabbc5d..634c625da8ce 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -65,7 +65,7 @@ CROSS_COMPILE Specify an optional fixed part of the binutils filename. CROSS_COMPILE can be a part of the filename or the full path. -CROSS_COMPILE is also used for ccache is some setups. +CROSS_COMPILE is also used for ccache in some setups. CF -------------------------------------------------- @@ -162,3 +162,7 @@ For tags/TAGS/cscope targets, you can specify more than one arch to be included in the databases, separated by blank space. E.g.: $ make ALLSOURCE_ARCHS="x86 mips arm" tags + +To get all available archs you can also specify all. E.g.: + + $ make ALLSOURCE_ARCHS=all tags diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index b56ea860da21..1808f1157f30 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -145,11 +145,10 @@ and is between 256 and 4096 characters. It is defined in the file acpi= [HW,ACPI,X86] Advanced Configuration and Power Interface - Format: { force | off | ht | strict | noirq | rsdt } + Format: { force | off | strict | noirq | rsdt } force -- enable ACPI if default was off off -- disable ACPI if default was on noirq -- do not use ACPI for IRQ routing - ht -- run only enough ACPI to enable Hyper Threading strict -- Be less tolerant of platforms that are not strictly ACPI specification compliant. rsdt -- prefer RSDT over (default) XSDT @@ -758,6 +757,10 @@ and is between 256 and 4096 characters. It is defined in the file Default value is 0. Value can be changed at runtime via /selinux/enforce. + erst_disable [ACPI] + Disable Error Record Serialization Table (ERST) + support. + ether= [HW,NET] Ethernet cards parameters This option is obsoleted by the "netdev=" option, which has equivalent usage. See its documentation for details. @@ -852,6 +855,11 @@ and is between 256 and 4096 characters. It is defined in the file hd= [EIDE] (E)IDE hard drive subsystem geometry Format: ,, + hest_disable [ACPI] + Disable Hardware Error Source Table (HEST) support; + corresponding firmware-first mode error processing + logic will be disabled. + highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact size of . This works even on boxes that have no highmem otherwise. This also works to reduce highmem @@ -1252,6 +1260,8 @@ and is between 256 and 4096 characters. It is defined in the file * nohrst, nosrst, norst: suppress hard, soft and both resets. + * dump_id: dump IDENTIFY data. + If there are multiple matching configurations changing the same attribute, the last one is used. diff --git a/Documentation/mutex-design.txt b/Documentation/mutex-design.txt index aa60d1f627e5..c91ccc0720fa 100644 --- a/Documentation/mutex-design.txt +++ b/Documentation/mutex-design.txt @@ -66,14 +66,14 @@ of advantages of mutexes: c0377ccb : c0377ccb: f0 ff 08 lock decl (%eax) - c0377cce: 78 0e js c0377cde <.text.lock.mutex> + c0377cce: 78 0e js c0377cde <.text..lock.mutex> c0377cd0: c3 ret the unlocking fastpath is equally tight: c0377cd1 : c0377cd1: f0 ff 00 lock incl (%eax) - c0377cd4: 7e 0f jle c0377ce5 <.text.lock.mutex+0x7> + c0377cd4: 7e 0f jle c0377ce5 <.text..lock.mutex+0x7> c0377cd6: c3 ret - 'struct mutex' semantics are well-defined and are enforced if diff --git a/Documentation/timers/Makefile b/Documentation/timers/Makefile index c85625f4ab25..73f75f8a87dc 100644 --- a/Documentation/timers/Makefile +++ b/Documentation/timers/Makefile @@ -2,7 +2,7 @@ obj- := dummy.o # List of programs to build -hostprogs-y := hpet_example +hostprogs-$(CONFIG_X86) := hpet_example # Tell kbuild to always build the programs always := $(hostprogs-y) diff --git a/Documentation/vm/numa b/Documentation/vm/numa index e93ad9425e2a..a200a386429d 100644 --- a/Documentation/vm/numa +++ b/Documentation/vm/numa @@ -1,41 +1,149 @@ Started Nov 1999 by Kanoj Sarcar -The intent of this file is to have an uptodate, running commentary -from different people about NUMA specific code in the Linux vm. - -What is NUMA? It is an architecture where the memory access times -for different regions of memory from a given processor varies -according to the "distance" of the memory region from the processor. -Each region of memory to which access times are the same from any -cpu, is called a node. On such architectures, it is beneficial if -the kernel tries to minimize inter node communications. Schemes -for this range from kernel text and read-only data replication -across nodes, and trying to house all the data structures that -key components of the kernel need on memory on that node. - -Currently, all the numa support is to provide efficient handling -of widely discontiguous physical memory, so architectures which -are not NUMA but can have huge holes in the physical address space -can use the same code. All this code is bracketed by CONFIG_DISCONTIGMEM. - -The initial port includes NUMAizing the bootmem allocator code by -encapsulating all the pieces of information into a bootmem_data_t -structure. Node specific calls have been added to the allocator. -In theory, any platform which uses the bootmem allocator should -be able to put the bootmem and mem_map data structures anywhere -it deems best. - -Each node's page allocation data structures have also been encapsulated -into a pg_data_t. The bootmem_data_t is just one part of this. To -make the code look uniform between NUMA and regular UMA platforms, -UMA platforms have a statically allocated pg_data_t too (contig_page_data). -For the sake of uniformity, the function num_online_nodes() is also defined -for all platforms. As we run benchmarks, we might decide to NUMAize -more variables like low_on_memory, nr_free_pages etc into the pg_data_t. - -The NUMA aware page allocation code currently tries to allocate pages -from different nodes in a round robin manner. This will be changed to -do concentratic circle search, starting from current node, once the -NUMA port achieves more maturity. The call alloc_pages_node has been -added, so that drivers can make the call and not worry about whether -it is running on a NUMA or UMA platform. +What is NUMA? + +This question can be answered from a couple of perspectives: the +hardware view and the Linux software view. + +From the hardware perspective, a NUMA system is a computer platform that +comprises multiple components or assemblies each of which may contain 0 +or more CPUs, local memory, and/or IO buses. For brevity and to +disambiguate the hardware view of these physical components/assemblies +from the software abstraction thereof, we'll call the components/assemblies +'cells' in this document. + +Each of the 'cells' may be viewed as an SMP [symmetric multi-processor] subset +of the system--although some components necessary for a stand-alone SMP system +may not be populated on any given cell. The cells of the NUMA system are +connected together with some sort of system interconnect--e.g., a crossbar or +point-to-point link are common types of NUMA system interconnects. Both of +these types of interconnects can be aggregated to create NUMA platforms with +cells at multiple distances from other cells. + +For Linux, the NUMA platforms of interest are primarily what is known as Cache +Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible +to and accessible from any CPU attached to any cell and cache coherency +is handled in hardware by the processor caches and/or the system interconnect. + +Memory access time and effective memory bandwidth varies depending on how far +away the cell containing the CPU or IO bus making the memory access is from the +cell containing the target memory. For example, access to memory by CPUs +attached to the same cell will experience faster access times and higher +bandwidths than accesses to memory on other, remote cells. NUMA platforms +can have cells at multiple remote distances from any given cell. + +Platform vendors don't build NUMA systems just to make software developers' +lives interesting. Rather, this architecture is a means to provide scalable +memory bandwidth. However, to achieve scalable memory bandwidth, system and +application software must arrange for a large majority of the memory references +[cache misses] to be to "local" memory--memory on the same cell, if any--or +to the closest cell with memory. + +This leads to the Linux software view of a NUMA system: + +Linux divides the system's hardware resources into multiple software +abstractions called "nodes". Linux maps the nodes onto the physical cells +of the hardware platform, abstracting away some of the details for some +architectures. As with physical cells, software nodes may contain 0 or more +CPUs, memory and/or IO buses. And, again, memory accesses to memory on +"closer" nodes--nodes that map to closer cells--will generally experience +faster access times and higher effective bandwidth than accesses to more +remote cells. + +For some architectures, such as x86, Linux will "hide" any node representing a +physical cell that has no memory attached, and reassign any CPUs attached to +that cell to a node representing a cell that does have memory. Thus, on +these architectures, one cannot assume that all CPUs that Linux associates with +a given node will see the same local memory access times and bandwidth. + +In addition, for some architectures, again x86 is an example, Linux supports +the emulation of additional nodes. For NUMA emulation, linux will carve up +the existing nodes--or the system memory for non-NUMA platforms--into multiple +nodes. Each emulated node will manage a fraction of the underlying cells' +physical memory. NUMA emluation is useful for testing NUMA kernel and +application features on non-NUMA platforms, and as a sort of memory resource +management mechanism when used together with cpusets. +[see Documentation/cgroups/cpusets.txt] + +For each node with memory, Linux constructs an independent memory management +subsystem, complete with its own free page lists, in-use page lists, usage +statistics and locks to mediate access. In addition, Linux constructs for +each memory zone [one or more of DMA, DMA32, NORMAL, HIGH_MEMORY, MOVABLE], +an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a +selected zone/node cannot satisfy the allocation request. This situation, +when a zone has no available memory to satisfy a request, is called +"overflow" or "fallback". + +Because some nodes contain multiple zones containing different types of +memory, Linux must decide whether to order the zonelists such that allocations +fall back to the same zone type on a different node, or to a different zone +type on the same node. This is an important consideration because some zones, +such as DMA or DMA32, represent relatively scarce resources. Linux chooses +a default zonelist order based on the sizes of the various zone types relative +to the total memory of the node and the total memory of the system. The +default zonelist order may be overridden using the numa_zonelist_order kernel +boot parameter or sysctl. [see Documentation/kernel-parameters.txt and +Documentation/sysctl/vm.txt] + +By default, Linux will attempt to satisfy memory allocation requests from the +node to which the CPU that executes the request is assigned. Specifically, +Linux will attempt to allocate from the first node in the appropriate zonelist +for the node where the request originates. This is called "local allocation." +If the "local" node cannot satisfy the request, the kernel will examine other +nodes' zones in the selected zonelist looking for the first zone in the list +that can satisfy the request. + +Local allocation will tend to keep subsequent access to the allocated memory +"local" to the underlying physical resources and off the system interconnect-- +as long as the task on whose behalf the kernel allocated some memory does not +later migrate away from that memory. The Linux scheduler is aware of the +NUMA topology of the platform--embodied in the "scheduling domains" data +structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler +attempts to minimize task migration to distant scheduling domains. However, +the scheduler does not take a task's NUMA footprint into account directly. +Thus, under sufficient imbalance, tasks can migrate between nodes, remote +from their initial node and kernel data structures. + +System administrators and application designers can restrict a task's migration +to improve NUMA locality using various CPU affinity command line interfaces, +such as taskset(1) and numactl(1), and program interfaces such as +sched_setaffinity(2). Further, one can modify the kernel's default local +allocation behavior using Linux NUMA memory policy. +[see Documentation/vm/numa_memory_policy.] + +System administrators can restrict the CPUs and nodes' memories that a non- +privileged user can specify in the scheduling or NUMA commands and functions +using control groups and CPUsets. [see Documentation/cgroups/CPUsets.txt] + +On architectures that do not hide memoryless nodes, Linux will include only +zones [nodes] with memory in the zonelists. This means that for a memoryless +node the "local memory node"--the node of the first zone in CPU's node's +zonelist--will not be the node itself. Rather, it will be the node that the +kernel selected as the nearest node with memory when it built the zonelists. +So, default, local allocations will succeed with the kernel supplying the +closest available memory. This is a consequence of the same mechanism that +allows such allocations to fallback to other nearby nodes when a node that +does contain memory overflows. + +Some kernel allocations do not want or cannot tolerate this allocation fallback +behavior. Rather they want to be sure they get memory from the specified node +or get notified that the node has no free memory. This is usually the case when +a subsystem allocates per CPU memory resources, for example. + +A typical model for making such an allocation is to obtain the node id of the +node to which the "current CPU" is attached using one of the kernel's +numa_node_id() or CPU_to_node() functions and then request memory from only +the node id returned. When such an allocation fails, the requesting subsystem +may revert to its own fallback path. The slab kernel memory allocator is an +example of this. Or, the subsystem may choose to disable or not to enable +itself on allocation failure. The kernel profiling subsystem is an example of +this. + +If the architecture supports--does not hide--memoryless nodes, then CPUs +attached to memoryless nodes would always incur the fallback path overhead +or some subsystems would fail to initialize if they attempted to allocated +memory exclusively from a node without memory. To support such +architectures transparently, kernel subsystems can use the numa_mem_id() +or cpu_to_mem() function to locate the "local memory node" for the calling or +specified CPU. Again, this is the same node from which default, local page +allocations will be attempted. diff --git a/MAINTAINERS b/MAINTAINERS index 18355cc1ce26..2652ebc5ab40 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2887,6 +2887,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git S: Maintained F: drivers/input/ +INTEL IDLE DRIVER +M: Len Brown +L: linux-pm@lists.linux-foundation.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git +S: Supported +F: drivers/idle/intel_idle.c + INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme L: linux-fbdev@vger.kernel.org @@ -3235,7 +3242,7 @@ L: autofs@linux.kernel.org S: Maintained F: fs/autofs4/ -KERNEL BUILD +KERNEL BUILD + files below scripts/ (unless maintained elsewhere) M: Michal Marek T: git git://repo.or.cz/linux-kbuild.git for-next T: git git://repo.or.cz/linux-kbuild.git for-linus @@ -3244,6 +3251,9 @@ S: Maintained F: Documentation/kbuild/ F: Makefile F: scripts/Makefile.* +F: scripts/basic/ +F: scripts/mk* +F: scripts/package/ KERNEL JANITORS L: kernel-janitors@vger.kernel.org @@ -4836,6 +4846,9 @@ W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported F: arch/s390/ F: drivers/s390/ +F: fs/partitions/ibm.c +F: Documentation/s390/ +F: Documentation/DocBook/s390* S390 NETWORK DRIVERS M: Ursula Braun @@ -5004,6 +5017,12 @@ L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-s3c.c +SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER +M: Viresh Kumar +L: linux-mmc@vger.kernel.org +S: Maintained +F: drivers/mmc/host/sdhci-spear.c + SECURITY SUBSYSTEM M: James Morris L: linux-security-module@vger.kernel.org (suggested Cc:) diff --git a/Makefile b/Makefile index ebc8225f7a96..efdc3d0d8e60 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 -SUBLEVEL = 34 -EXTRAVERSION = +SUBLEVEL = 35 +EXTRAVERSION = -rc1 NAME = Sheep on Meth # *DOCUMENTATION* @@ -183,11 +183,14 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ # CROSS_COMPILE can be set on the command line # make CROSS_COMPILE=ia64-linux- # Alternatively CROSS_COMPILE can be set in the environment. +# A third alternative is to store a setting in .config so that plain +# "make" in the configured kernel build directory always uses that. # Default value for CROSS_COMPILE is not to prefix executables # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile export KBUILD_BUILDHOST := $(SUBARCH) ARCH ?= $(SUBARCH) CROSS_COMPILE ?= +CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%) # Architecture as present in compile.h UTS_MACHINE := $(ARCH) @@ -576,9 +579,6 @@ KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) -# revert to pre-gcc-4.4 behaviour of .eh_frame -KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) - # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) @@ -882,9 +882,6 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; PHONY += $(vmlinux-dirs) $(vmlinux-dirs): prepare scripts $(Q)$(MAKE) $(build)=$@ -ifdef CONFIG_MODULES - $(Q)$(MAKE) $(modbuiltin)=$@ -endif # Build the kernel release string # @@ -907,14 +904,19 @@ endif # $(localver) # localversion* (files without backups, containing '~') # $(CONFIG_LOCALVERSION) (from kernel config setting) -# $(localver-auto) (only if CONFIG_LOCALVERSION_AUTO is set) -# ./scripts/setlocalversion (SCM tag, if one exists) -# $(LOCALVERSION) (from make command line if provided) +# $(LOCALVERSION) (from make command line, if provided) +# $(localver-extra) +# $(scm-identifier) (unique SCM tag, if one exists) +# ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO) +# .scmversion (only with CONFIG_LOCALVERSION_AUTO) +# + (only without CONFIG_LOCALVERSION_AUTO +# and without LOCALVERSION= and +# repository is at non-tagged commit) # -# Note how the final $(localver-auto) string is included *only* if the -# kernel config option CONFIG_LOCALVERSION_AUTO is selected. Also, at the -# moment, only git is supported but other SCMs can edit the script -# scripts/setlocalversion and add the appropriate checks as needed. +# For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has +# been revised beyond a tagged commit, `+' is appended to the version string +# when not overridden by using "make LOCALVERSION=". This indicates that the +# kernel is not a vanilla release version and has been modified. pattern = ".*/localversion[^~]*" string = $(shell cat /dev/null \ @@ -923,26 +925,32 @@ string = $(shell cat /dev/null \ localver = $(subst $(space),, $(string) \ $(patsubst "%",%,$(CONFIG_LOCALVERSION))) -# If CONFIG_LOCALVERSION_AUTO is set scripts/setlocalversion is called -# and if the SCM is know a tag from the SCM is appended. -# The appended tag is determined by the SCM used. +# scripts/setlocalversion is called to create a unique identifier if the source +# is managed by a known SCM and the repository has been revised since the last +# tagged (release) commit. The format of the identifier is determined by the +# SCM's implementation. # # .scmversion is used when generating rpm packages so we do not loose # the version information from the SCM when we do the build of the kernel # from the copied source -ifdef CONFIG_LOCALVERSION_AUTO - ifeq ($(wildcard .scmversion),) - _localver-auto = $(shell $(CONFIG_SHELL) \ + scm-identifier = $(shell $(CONFIG_SHELL) \ $(srctree)/scripts/setlocalversion $(srctree)) else - _localver-auto = $(shell cat .scmversion 2> /dev/null) + scm-identifier = $(shell cat .scmversion 2> /dev/null) endif - localver-auto = $(LOCALVERSION)$(_localver-auto) +ifdef CONFIG_LOCALVERSION_AUTO + localver-extra = $(scm-identifier) +else + ifneq ($(scm-identifier),) + ifeq ($(LOCALVERSION),) + localver-extra = + + endif + endif endif -localver-full = $(localver)$(localver-auto) +localver-full = $(localver)$(LOCALVERSION)$(localver-extra) # Store (new) KERNELRELASE string in include/config/kernel.release kernelrelease = $(KERNELVERSION)$(localver-full) @@ -1089,11 +1097,16 @@ all: modules PHONY += modules modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order - $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.builtin) > $(objtree)/modules.builtin @$(kecho) ' Building modules, stage 2.'; $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_modbuild +modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) + $(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin + +%/modules.builtin: include/config/auto.conf + $(Q)$(MAKE) $(modbuiltin)=$* + # Target to prepare building external modules PHONY += modules_prepare @@ -1104,7 +1117,7 @@ PHONY += modules_install modules_install: _modinst_ _modinst_post PHONY += _modinst_ -_modinst_: +_modinst_: modules.builtin @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \ echo "Warning: you may need to install module-init-tools"; \ echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\ @@ -1247,7 +1260,9 @@ help: @echo ' firmware_install- Install all firmware to INSTALL_FW_PATH' @echo ' (default: $$(INSTALL_MOD_PATH)/lib/firmware)' @echo ' dir/ - Build all files in dir and below' - @echo ' dir/file.[ois] - Build specified target only' + @echo ' dir/file.[oisS] - Build specified target only' + @echo ' dir/file.lst - Build specified mixed source/assembly target only' + @echo ' (requires a recent binutils and recent build (System.map))' @echo ' dir/file.ko - Build module including final link' @echo ' modules_prepare - Set up for building external modules' @echo ' tags/TAGS - Generate tags file for editors' diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 24efdfe277fc..3e2e540a0f2a 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -61,6 +61,9 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config GENERIC_ISA_DMA bool default y diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h index 440747ca6349..5728c52a7412 100644 --- a/arch/alpha/include/asm/scatterlist.h +++ b/arch/alpha/include/asm/scatterlist.h @@ -1,24 +1,7 @@ #ifndef _ALPHA_SCATTERLIST_H #define _ALPHA_SCATTERLIST_H -#include -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - - unsigned int length; - - dma_addr_t dma_address; - __u32 dma_length; -}; - -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->dma_length) +#include #define ISA_DMA_THRESHOLD (~0UL) diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig index 9236475e7131..44cea2ddd22b 100644 --- a/arch/arm/configs/s3c2410_defconfig +++ b/arch/arm/configs/s3c2410_defconfig @@ -1,12 +1,14 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:17:31 2010 +# Fri May 28 19:15:48 2010 # CONFIG_ARM=y CONFIG_HAVE_PWM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -186,9 +189,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -224,7 +229,7 @@ CONFIG_ARCH_S3C2410=y # CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_S5P6440 is not set # CONFIG_ARCH_S5P6442 is not set -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set # CONFIG_ARCH_S5PV210 is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -233,6 +238,7 @@ CONFIG_ARCH_S3C2410=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -243,11 +249,18 @@ CONFIG_S3C_BOOT_ERROR_RESET=y CONFIG_S3C_BOOT_UART_FORCE_FIFO=y CONFIG_S3C_LOWLEVEL_UART_PORT=0 CONFIG_SAMSUNG_CLKSRC=y +CONFIG_S3C_GPIO_CFG_S3C24XX=y +CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S3C_GPIO_PULL_UP=y CONFIG_SAMSUNG_GPIO_EXTRA=0 CONFIG_S3C_GPIO_SPACE=0 CONFIG_S3C_ADC=y CONFIG_S3C_DEV_HSMMC=y +CONFIG_S3C_DEV_HSMMC1=y +CONFIG_S3C_DEV_HWMON=y +CONFIG_S3C_DEV_FB=y CONFIG_S3C_DEV_USB_HOST=y +CONFIG_S3C_DEV_WDT=y CONFIG_S3C_DEV_NAND=y CONFIG_S3C_DMA=y @@ -260,6 +273,7 @@ CONFIG_PLAT_S3C24XX=y CONFIG_CPU_LLSERIAL_S3C2410=y CONFIG_CPU_LLSERIAL_S3C2440=y CONFIG_S3C2410_CLOCK=y +CONFIG_S3C2443_CLOCK=y CONFIG_S3C24XX_DCLK=y CONFIG_S3C24XX_PWM=y CONFIG_S3C24XX_GPIO_EXTRA=128 @@ -270,6 +284,7 @@ CONFIG_S3C2410_DMA=y # CONFIG_S3C2410_DMA_DEBUG is not set CONFIG_MACH_SMDK=y CONFIG_S3C24XX_SIMTEC_AUDIO=y +CONFIG_S3C2410_SETUP_TS=y # # S3C2400 Machines @@ -289,6 +304,7 @@ CONFIG_ARCH_H1940=y # CONFIG_H1940BT is not set CONFIG_PM_H1940=y CONFIG_MACH_N30=y +CONFIG_MACH_N35=y CONFIG_ARCH_BAST=y CONFIG_MACH_OTOM=y CONFIG_MACH_AML_M5900=y @@ -309,6 +325,13 @@ CONFIG_MACH_SMDK2413=y CONFIG_MACH_S3C2413=y CONFIG_MACH_SMDK2412=y CONFIG_MACH_VSTMS=y +CONFIG_CPU_S3C2416=y +CONFIG_S3C2416_DMA=y + +# +# S3C2416 Machines +# +CONFIG_MACH_SMDK2416=y CONFIG_CPU_S3C2440=y CONFIG_CPU_S3C2442=y CONFIG_CPU_S3C244X=y @@ -320,9 +343,9 @@ CONFIG_S3C2440_DMA=y # S3C2440 and S3C2442 Machines # CONFIG_MACH_ANUBIS=y -# CONFIG_MACH_NEO1973_GTA02 is not set +CONFIG_MACH_NEO1973_GTA02=y CONFIG_MACH_OSIRIS=y -# CONFIG_MACH_OSIRIS_DVS is not set +CONFIG_MACH_OSIRIS_DVS=m CONFIG_MACH_RX3715=y CONFIG_ARCH_S3C2440=y CONFIG_MACH_NEXCODER_2440=y @@ -330,6 +353,7 @@ CONFIG_SMDK2440_CPU2440=y CONFIG_SMDK2440_CPU2442=y CONFIG_MACH_AT2440EVB=y CONFIG_MACH_MINI2440=y +CONFIG_MACH_RX1950=y CONFIG_CPU_S3C2443=y CONFIG_S3C2443_DMA=y @@ -410,6 +434,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="root=/dev/hda1 ro init=/bin/bash console=ttySAC0" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -509,7 +534,9 @@ CONFIG_TCP_CONG_ILLINOIS=m # CONFIG_DEFAULT_BIC is not set CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_HTCP is not set +# CONFIG_DEFAULT_HYBLA is not set # CONFIG_DEFAULT_VEGAS is not set +# CONFIG_DEFAULT_VENO is not set # CONFIG_DEFAULT_WESTWOOD is not set # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" @@ -566,6 +593,16 @@ CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m # CONFIG_NETFILTER_TPROXY is not set CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m + +# +# Xtables targets +# CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m # CONFIG_NETFILTER_XT_TARGET_CT is not set @@ -577,9 +614,14 @@ CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m # CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set CONFIG_NETFILTER_XT_TARGET_RATEEST=m +# CONFIG_NETFILTER_XT_TARGET_TEE is not set # CONFIG_NETFILTER_XT_TARGET_TRACE is not set CONFIG_NETFILTER_XT_TARGET_TCPMSS=m # CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m @@ -598,6 +640,7 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m CONFIG_NETFILTER_XT_MATCH_MARK=m CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +# CONFIG_NETFILTER_XT_MATCH_OSF is not set CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m @@ -605,7 +648,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -# CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT is not set CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -613,7 +655,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -# CONFIG_NETFILTER_XT_MATCH_OSF is not set CONFIG_IP_VS=m # CONFIG_IP_VS_IPV6 is not set # CONFIG_IP_VS_DEBUG is not set @@ -713,6 +754,7 @@ CONFIG_IP6_NF_RAW=m # CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set +# CONFIG_L2TP is not set # CONFIG_BRIDGE is not set # CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set @@ -739,6 +781,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_IRDA is not set CONFIG_BT=m CONFIG_BT_L2CAP=m +# CONFIG_BT_L2CAP_EXT_FEATURES is not set CONFIG_BT_SCO=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y @@ -775,6 +818,7 @@ CONFIG_CFG80211_WEXT=y CONFIG_WIRELESS_EXT_SYSFS=y # CONFIG_LIB80211 is not set CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y # CONFIG_MAC80211_RC_DEFAULT_PID is not set CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y @@ -785,6 +829,7 @@ CONFIG_MAC80211_LEDS=y # CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set +# CONFIG_CAIF is not set # # Device Drivers @@ -828,6 +873,7 @@ CONFIG_MTD_BLOCK=y # CONFIG_INFTL is not set # CONFIG_RFD_FTL is not set # CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set # CONFIG_MTD_OOPS is not set # @@ -882,9 +928,12 @@ CONFIG_MTD_ROM=y # CONFIG_MTD_DOC2001 is not set # CONFIG_MTD_DOC2001PLUS is not set CONFIG_MTD_NAND=y -# CONFIG_MTD_NAND_VERIFY_WRITE is not set +CONFIG_MTD_NAND_ECC=y # CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_SM_COMMON is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018 # CONFIG_MTD_NAND_GPIO is not set CONFIG_MTD_NAND_IDS=y CONFIG_MTD_NAND_S3C2410=y @@ -1149,6 +1198,7 @@ CONFIG_KEYBOARD_ATKBD=y # CONFIG_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set # CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_TCA6416 is not set # CONFIG_KEYBOARD_MATRIX is not set # CONFIG_KEYBOARD_LM8323 is not set # CONFIG_KEYBOARD_MAX7359 is not set @@ -1212,6 +1262,7 @@ CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_AD7879_SPI is not set # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_EETI is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_S3C2410 is not set @@ -1248,6 +1299,7 @@ CONFIG_TOUCHSCREEN_USB_NEXIO=y # CONFIG_TOUCHSCREEN_TSC2007 is not set # CONFIG_TOUCHSCREEN_W90X900 is not set CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set CONFIG_INPUT_ATI_REMOTE=m CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m @@ -1255,6 +1307,8 @@ CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF50633_PMU is not set +# CONFIG_INPUT_PCF8574 is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m # @@ -1287,6 +1341,7 @@ CONFIG_SERIAL_NONSTANDARD=y # CONFIG_MOXA_INTELLIO is not set # CONFIG_MOXA_SMARTIO is not set # CONFIG_N_HDLC is not set +# CONFIG_N_GSM is not set # CONFIG_RISCOM8 is not set # CONFIG_SPECIALIX is not set # CONFIG_STALDRV is not set @@ -1324,6 +1379,8 @@ CONFIG_SERIAL_S3C2440=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -1439,7 +1496,16 @@ CONFIG_GPIOLIB=y # AC97 GPIO expanders: # # CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +# CONFIG_PDA_POWER is not set +# CONFIG_APM_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_BATTERY_DS2760 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_BQ27x00 is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_CHARGER_PCF50633 is not set CONFIG_HWMON=y CONFIG_HWMON_VID=m # CONFIG_HWMON_DEBUG_CHIP is not set @@ -1499,6 +1565,7 @@ CONFIG_SENSORS_LM85=m # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set # CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set @@ -1555,7 +1622,7 @@ CONFIG_MFD_SM501=y # CONFIG_HTC_PASIC3 is not set # CONFIG_HTC_I2CPLD is not set # CONFIG_UCB1400_CORE is not set -# CONFIG_TPS65010 is not set +CONFIG_TPS65010=m # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_T7L66XB is not set @@ -1568,8 +1635,10 @@ CONFIG_MFD_SM501=y # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_PCF50633 is not set +CONFIG_MFD_PCF50633=y # CONFIG_MFD_MC13783 is not set +# CONFIG_PCF50633_ADC is not set +CONFIG_PCF50633_GPIO=y # CONFIG_AB3100_CORE is not set # CONFIG_EZX_PCAP is not set # CONFIG_AB4500_CORE is not set @@ -1685,6 +1754,7 @@ CONFIG_SND_S3C24XX_SOC_I2S=y CONFIG_SND_S3C_I2SV2_SOC=m CONFIG_SND_S3C2412_SOC_I2S=m CONFIG_SND_S3C_SOC_AC97=m +# CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753 is not set CONFIG_SND_S3C24XX_SOC_JIVE_WM8750=m CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710=m CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650=m @@ -1836,10 +1906,12 @@ CONFIG_USB_SERIAL_PL2303=y # CONFIG_USB_SERIAL_TI is not set # CONFIG_USB_SERIAL_CYBERJACK is not set # CONFIG_USB_SERIAL_XIRCOM is not set +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m # CONFIG_USB_SERIAL_OMNINET is not set # CONFIG_USB_SERIAL_OPTICON is not set # CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set +# CONFIG_USB_SERIAL_ZIO is not set # CONFIG_USB_SERIAL_DEBUG is not set # @@ -1991,6 +2063,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_RP5C01 is not set # CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_PCF50633 is not set # # on-CPU RTC drivers @@ -1999,10 +2072,6 @@ CONFIG_RTC_DRV_S3C=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -2274,6 +2343,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -2284,6 +2354,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig index a3a9993e5cd0..2b642386f030 100644 --- a/arch/arm/configs/s3c6400_defconfig +++ b/arch/arm/configs/s3c6400_defconfig @@ -1,11 +1,14 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:17:32 2010 +# Fri May 28 19:05:39 2010 # CONFIG_ARM=y +CONFIG_HAVE_PWM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -34,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -179,9 +183,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -217,7 +223,7 @@ CONFIG_MMU=y CONFIG_ARCH_S3C64XX=y # CONFIG_ARCH_S5P6440 is not set # CONFIG_ARCH_S5P6442 is not set -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set # CONFIG_ARCH_S5PV210 is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -226,6 +232,7 @@ CONFIG_ARCH_S3C64XX=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -247,11 +254,17 @@ CONFIG_S3C_GPIO_TRACK=y # CONFIG_S3C_ADC is not set CONFIG_S3C_DEV_HSMMC=y CONFIG_S3C_DEV_HSMMC1=y +CONFIG_S3C_DEV_HSMMC2=y +CONFIG_S3C_DEV_HWMON=y CONFIG_S3C_DEV_I2C1=y CONFIG_S3C_DEV_FB=y CONFIG_S3C_DEV_USB_HOST=y CONFIG_S3C_DEV_USB_HSOTG=y +CONFIG_S3C_DEV_WDT=y CONFIG_S3C_DEV_NAND=y +CONFIG_S3C_DEV_RTC=y +CONFIG_SAMSUNG_DEV_ADC=y +CONFIG_SAMSUNG_DEV_TS=y CONFIG_S3C_DMA=y # @@ -260,7 +273,9 @@ CONFIG_S3C_DMA=y # CONFIG_SAMSUNG_PM_DEBUG is not set # CONFIG_S3C_PM_DEBUG_LED_SMDK is not set # CONFIG_SAMSUNG_PM_CHECK is not set +CONFIG_SAMSUNG_WAKEMASK=y CONFIG_PLAT_S3C64XX=y +CONFIG_CPU_S3C6400=y CONFIG_CPU_S3C6410=y CONFIG_S3C64XX_DMA=y CONFIG_S3C64XX_SETUP_SDHCI=y @@ -268,15 +283,18 @@ CONFIG_S3C64XX_SETUP_I2C0=y CONFIG_S3C64XX_SETUP_I2C1=y CONFIG_S3C64XX_SETUP_FB_24BPP=y CONFIG_S3C64XX_SETUP_SDHCI_GPIO=y -# CONFIG_MACH_SMDK6400 is not set -# CONFIG_MACH_ANW6410 is not set +CONFIG_MACH_SMDK6400=y +CONFIG_MACH_ANW6410=y CONFIG_MACH_SMDK6410=y CONFIG_SMDK6410_SD_CH0=y # CONFIG_SMDK6410_SD_CH1 is not set # CONFIG_SMDK6410_WM1190_EV1 is not set # CONFIG_SMDK6410_WM1192_EV1 is not set -# CONFIG_MACH_NCP is not set -# CONFIG_MACH_HMT is not set +CONFIG_MACH_NCP=y +CONFIG_MACH_HMT=y +CONFIG_MACH_SMARTQ=y +CONFIG_MACH_SMARTQ5=y +CONFIG_MACH_SMARTQ7=y # # Processor Type @@ -302,6 +320,7 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_411920 is not set CONFIG_ARM_VIC=y @@ -352,6 +371,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x51000000,6M ramdisk_size=6144" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -430,6 +450,7 @@ CONFIG_MTD=y # CONFIG_INFTL is not set # CONFIG_RFD_FTL is not set # CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set # CONFIG_MTD_OOPS is not set # @@ -460,6 +481,9 @@ CONFIG_MTD_CFI_I2=y # # Self-contained MTD device drivers # +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -472,9 +496,12 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_DOC2001 is not set # CONFIG_MTD_DOC2001PLUS is not set CONFIG_MTD_NAND=y -# CONFIG_MTD_NAND_VERIFY_WRITE is not set +CONFIG_MTD_NAND_ECC=y # CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_SM_COMMON is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR=0xFF108018 # CONFIG_MTD_NAND_GPIO is not set CONFIG_MTD_NAND_IDS=y CONFIG_MTD_NAND_S3C2410=y @@ -483,6 +510,7 @@ CONFIG_MTD_NAND_S3C2410=y # CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set # CONFIG_MTD_NAND_DISKONCHIP is not set # CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ALAUDA is not set # CONFIG_MTD_ONENAND is not set # @@ -503,6 +531,7 @@ CONFIG_BLK_DEV_LOOP=y # # DRBD disabled because PROC_FS, INET or CONNECTOR not selected # +# CONFIG_BLK_DEV_UB is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=4096 @@ -516,12 +545,14 @@ CONFIG_MISC_DEVICES=y # CONFIG_ISL29003 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_DS1682 is not set +# CONFIG_TI_DAC7512 is not set # CONFIG_C2PORT is not set # # EEPROM support # CONFIG_EEPROM_AT24=y +# CONFIG_EEPROM_AT25 is not set # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set # CONFIG_EEPROM_93CX6 is not set @@ -569,6 +600,7 @@ CONFIG_KEYBOARD_ATKBD=y # CONFIG_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set # CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_TCA6416 is not set # CONFIG_KEYBOARD_MATRIX is not set # CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_NEWTON is not set @@ -635,9 +667,12 @@ CONFIG_SERIAL_SAMSUNG_UARTS=4 # CONFIG_SERIAL_SAMSUNG_DEBUG is not set CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_S3C6400=y +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -673,6 +708,7 @@ CONFIG_I2C_S3C2410=y # # CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set # # Other I2C/SMBus bus drivers @@ -682,7 +718,24 @@ CONFIG_I2C_S3C2410=y # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_SPI is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_BITBANG=m +CONFIG_SPI_GPIO=m +CONFIG_SPI_S3C64XX=m +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_DESIGNWARE is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set # # PPS support @@ -714,6 +767,9 @@ CONFIG_GPIOLIB=y # # SPI GPIO expanders: # +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set # # AC97 GPIO expanders: @@ -729,6 +785,7 @@ CONFIG_HWMON=y # # CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADCXX is not set # CONFIG_SENSORS_ADM1021 is not set # CONFIG_SENSORS_ADM1025 is not set # CONFIG_SENSORS_ADM1026 is not set @@ -750,6 +807,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set # CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set # CONFIG_SENSORS_LM73 is not set # CONFIG_SENSORS_LM75 is not set # CONFIG_SENSORS_LM77 is not set @@ -764,6 +822,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LTC4215 is not set # CONFIG_SENSORS_LTC4245 is not set # CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set @@ -775,6 +834,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set # CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set @@ -788,9 +848,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_LIS3_SPI is not set # CONFIG_SENSORS_LIS3_I2C is not set # CONFIG_THERMAL is not set # CONFIG_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y CONFIG_SSB_POSSIBLE=y # @@ -823,7 +885,10 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set # CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_AB4500_CORE is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -832,8 +897,47 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set -# CONFIG_FB is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_S3C=y +# CONFIG_FB_S3C_DEBUG_REGWRITE is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +CONFIG_LCD_LTV350QV=y +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +CONFIG_BACKLIGHT_PWM=y # # Display device support @@ -845,6 +949,8 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE is not set +# CONFIG_LOGO is not set CONFIG_SOUND=y CONFIG_SOUND_OSS_CORE=y CONFIG_SOUND_OSS_CORE_PRECLAIM=y @@ -873,10 +979,16 @@ CONFIG_SND_DRIVERS=y # CONFIG_SND_SERIAL_U16550 is not set # CONFIG_SND_MPU401 is not set CONFIG_SND_ARM=y +CONFIG_SND_SPI=y +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set CONFIG_SND_SOC=m CONFIG_SND_SOC_AC97_BUS=y CONFIG_SND_S3C24XX_SOC=m CONFIG_SND_S3C_SOC_AC97=m +# CONFIG_SND_S3C64XX_SOC_WM8580 is not set CONFIG_SND_SOC_SMDK_WM9713=m CONFIG_SND_SOC_I2C_AND_SPI=m # CONFIG_SND_SOC_ALL_CODECS is not set @@ -886,29 +998,197 @@ CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y # CONFIG_HIDRAW is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=y # CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set # # Special HID drivers # +# CONFIG_HID_3M_PCT is not set +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +# CONFIG_HID_CANDO is not set +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +# CONFIG_HID_PRODIKEYS is not set +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EGALAX is not set +CONFIG_HID_EZKEY=y +CONFIG_HID_KYE=y +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_TWINHAN is not set +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +CONFIG_HID_MICROSOFT=y +# CONFIG_HID_MOSART is not set +CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_QUANTA is not set +# CONFIG_HID_ROCCAT_KONE is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set +# CONFIG_HID_STANTUM is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y # CONFIG_USB_ARCH_HAS_EHCI is not set -# CONFIG_USB is not set +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y # -# Enable Host or Gadget support to see Inventra options +# Miscellaneous USB options # +CONFIG_USB_DEVICEFS=y +CONFIG_USB_DEVICE_CLASS=y +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_MON is not set +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +CONFIG_USB_OHCI_HCD=y +# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set +# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set +# CONFIG_USB_MUSB_HDRC is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set # # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # + +# +# also be needed; see USB_STORAGE Help for more info +# +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +# CONFIG_USB_EZUSB is not set +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_AIRCABLE is not set +# CONFIG_USB_SERIAL_ARK3116 is not set +# CONFIG_USB_SERIAL_BELKIN is not set +# CONFIG_USB_SERIAL_CH341 is not set +# CONFIG_USB_SERIAL_WHITEHEAT is not set +# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set +# CONFIG_USB_SERIAL_CP210X is not set +# CONFIG_USB_SERIAL_CYPRESS_M8 is not set +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +# CONFIG_USB_SERIAL_FUNSOFT is not set +# CONFIG_USB_SERIAL_VISOR is not set +# CONFIG_USB_SERIAL_IPAQ is not set +# CONFIG_USB_SERIAL_IR is not set +# CONFIG_USB_SERIAL_EDGEPORT is not set +# CONFIG_USB_SERIAL_EDGEPORT_TI is not set +# CONFIG_USB_SERIAL_GARMIN is not set +# CONFIG_USB_SERIAL_IPW is not set +# CONFIG_USB_SERIAL_IUU is not set +# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set +# CONFIG_USB_SERIAL_KEYSPAN is not set +# CONFIG_USB_SERIAL_KLSI is not set +# CONFIG_USB_SERIAL_KOBIL_SCT is not set +# CONFIG_USB_SERIAL_MCT_U232 is not set +# CONFIG_USB_SERIAL_MOS7720 is not set +# CONFIG_USB_SERIAL_MOS7840 is not set +# CONFIG_USB_SERIAL_MOTOROLA is not set +# CONFIG_USB_SERIAL_NAVMAN is not set +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_OTI6858 is not set +# CONFIG_USB_SERIAL_QCAUX is not set +# CONFIG_USB_SERIAL_QUALCOMM is not set +# CONFIG_USB_SERIAL_SPCP8X5 is not set +# CONFIG_USB_SERIAL_HP4X is not set +# CONFIG_USB_SERIAL_SAFE is not set +# CONFIG_USB_SERIAL_SIEMENS_MPI is not set +# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set +# CONFIG_USB_SERIAL_SYMBOL is not set +# CONFIG_USB_SERIAL_TI is not set +# CONFIG_USB_SERIAL_CYBERJACK is not set +# CONFIG_USB_SERIAL_XIRCOM is not set +# CONFIG_USB_SERIAL_OPTION is not set +# CONFIG_USB_SERIAL_OMNINET is not set +# CONFIG_USB_SERIAL_OPTICON is not set +# CONFIG_USB_SERIAL_VIVOPAY_SERIAL is not set +# CONFIG_USB_SERIAL_ZIO is not set +# CONFIG_USB_SERIAL_DEBUG is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_ISIGHTFW is not set # CONFIG_USB_GADGET is not set # # OTG and related infrastructure # +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ULPI is not set +# CONFIG_NOP_USB_XCEIV is not set CONFIG_MMC=y CONFIG_MMC_DEBUG=y CONFIG_MMC_UNSAFE_RESUME=y @@ -928,18 +1208,80 @@ CONFIG_MMC_SDHCI=y # CONFIG_MMC_SDHCI_PLTFM is not set CONFIG_MMC_SDHCI_S3C=y # CONFIG_MMC_SDHCI_S3C_DMA is not set +# CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y -# CONFIG_RTC_CLASS is not set +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_S3C=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -1033,7 +1375,46 @@ CONFIG_ROMFS_ON_BLOCK=y # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -# CONFIG_NLS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set # # Kernel hacking @@ -1096,6 +1477,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -1106,6 +1488,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/configs/s5p6440_defconfig b/arch/arm/configs/s5p6440_defconfig index 619bfab3ab39..532e987beb4d 100644 --- a/arch/arm/configs/s5p6440_defconfig +++ b/arch/arm/configs/s5p6440_defconfig @@ -1,11 +1,13 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:18:18 2010 +# Wed May 26 19:04:32 2010 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -33,6 +35,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -178,9 +181,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -216,7 +221,7 @@ CONFIG_MMU=y # CONFIG_ARCH_S3C64XX is not set CONFIG_ARCH_S5P6440=y # CONFIG_ARCH_S5P6442 is not set -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set # CONFIG_ARCH_S5PV210 is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -225,6 +230,7 @@ CONFIG_ARCH_S5P6440=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -240,10 +246,15 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y CONFIG_S3C_GPIO_CFG_S3C24XX=y CONFIG_S3C_GPIO_CFG_S3C64XX=y CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S5P_GPIO_DRVSTR=y CONFIG_SAMSUNG_GPIO_EXTRA=0 CONFIG_S3C_GPIO_SPACE=0 CONFIG_S3C_GPIO_TRACK=y # CONFIG_S3C_ADC is not set +CONFIG_S3C_DEV_WDT=y +CONFIG_SAMSUNG_DEV_ADC=y +CONFIG_SAMSUNG_DEV_TS=y +CONFIG_S3C_PL330_DMA=y # # Power management @@ -276,10 +287,12 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_411920 is not set CONFIG_ARM_VIC=y CONFIG_ARM_VIC_NR=2 +CONFIG_PL330=y # # Bus support @@ -326,6 +339,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -490,7 +504,9 @@ CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_S3C2410 is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set # CONFIG_TOUCHSCREEN_WACOM_W8001 is not set @@ -546,6 +562,8 @@ CONFIG_SERIAL_S3C6400=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -593,6 +611,7 @@ CONFIG_GPIOLIB=y # CONFIG_HWMON is not set # CONFIG_THERMAL is not set # CONFIG_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y CONFIG_SSB_POSSIBLE=y # @@ -649,10 +668,6 @@ CONFIG_RTC_LIB=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -850,6 +865,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -860,6 +876,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/configs/s5p6442_defconfig b/arch/arm/configs/s5p6442_defconfig index d7ea27509cf4..068219b360f5 100644 --- a/arch/arm/configs/s5p6442_defconfig +++ b/arch/arm/configs/s5p6442_defconfig @@ -1,11 +1,13 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:18:19 2010 +# Wed May 26 19:04:34 2010 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -33,6 +35,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -178,9 +181,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -216,7 +221,7 @@ CONFIG_MMU=y # CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_S5P6440 is not set CONFIG_ARCH_S5P6442=y -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set # CONFIG_ARCH_S5PV210 is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -225,6 +230,7 @@ CONFIG_ARCH_S5P6442=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -240,10 +246,12 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y CONFIG_S3C_GPIO_CFG_S3C24XX=y CONFIG_S3C_GPIO_CFG_S3C64XX=y CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S5P_GPIO_DRVSTR=y CONFIG_SAMSUNG_GPIO_EXTRA=0 CONFIG_S3C_GPIO_SPACE=0 CONFIG_S3C_GPIO_TRACK=y # CONFIG_S3C_ADC is not set +CONFIG_S3C_PL330_DMA=y # # Power management @@ -276,10 +284,12 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_ARM_L1_CACHE_SHIFT=5 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_411920 is not set CONFIG_ARM_VIC=y CONFIG_ARM_VIC_NR=2 +CONFIG_PL330=y # # Bus support @@ -326,6 +336,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -471,6 +482,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set @@ -525,6 +537,8 @@ CONFIG_SERIAL_S5PV210=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -624,10 +638,6 @@ CONFIG_RTC_LIB=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -836,6 +846,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -846,6 +857,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/configs/s5pc100_defconfig b/arch/arm/configs/s5pc100_defconfig index 2053be6c9af1..ebc6245b9fca 100644 --- a/arch/arm/configs/s5pc100_defconfig +++ b/arch/arm/configs/s5pc100_defconfig @@ -1,12 +1,14 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.30 -# Wed Jul 1 15:53:07 2009 +# Linux kernel version: 2.6.34 +# Wed May 26 19:04:35 2010 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y -CONFIG_MMU=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y +CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_STACKTRACE_SUPPORT=y @@ -18,7 +20,9 @@ CONFIG_GENERIC_IRQ_PROBE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_ARM_L1_CACHE_SHIFT_6=y CONFIG_VECTORS_BASE=0xffff0000 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_CONSTRUCTORS=y @@ -31,6 +35,13 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_LZO is not set CONFIG_SWAP=y # CONFIG_SYSVIPC is not set # CONFIG_BSD_PROCESS_ACCT is not set @@ -38,14 +49,15 @@ CONFIG_SWAP=y # # RCU Subsystem # -CONFIG_CLASSIC_RCU=y -# CONFIG_TREE_RCU is not set -# CONFIG_PREEMPT_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_TINY_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set # CONFIG_TREE_RCU_TRACE is not set -# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=17 -# CONFIG_GROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -59,6 +71,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y CONFIG_RD_BZIP2=y CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -80,19 +93,21 @@ CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y CONFIG_AIO=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y # -# Performance Counters +# Kernel Performance Events And Counters # +# CONFIG_PERF_EVENTS is not set +# CONFIG_PERF_COUNTERS is not set CONFIG_VM_EVENT_COUNTERS=y CONFIG_SLUB_DEBUG=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_COMPAT_BRK=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y # CONFIG_KPROBES is not set CONFIG_HAVE_KPROBES=y @@ -122,25 +137,56 @@ CONFIG_LBDAF=y # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_UNLOCK is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_READ_TRYLOCK is not set +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_TRYLOCK is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +# CONFIG_MUTEX_SPIN_ON_OWNER is not set # CONFIG_FREEZER is not set # # System Type # +CONFIG_MMU=y # CONFIG_ARCH_AAEC2000 is not set # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -156,6 +202,7 @@ CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_ARCH_IXP2000 is not set # CONFIG_ARCH_IXP4XX is not set # CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_DOVE is not set # CONFIG_ARCH_KIRKWOOD is not set # CONFIG_ARCH_LOKI is not set # CONFIG_ARCH_MV78XX0 is not set @@ -164,39 +211,64 @@ CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_ARCH_KS8695 is not set # CONFIG_ARCH_NS9XXX is not set # CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_NUC93X is not set # CONFIG_ARCH_PNX4008 is not set # CONFIG_ARCH_PXA is not set # CONFIG_ARCH_MSM is not set +# CONFIG_ARCH_SHMOBILE is not set # CONFIG_ARCH_RPC is not set # CONFIG_ARCH_SA1100 is not set # CONFIG_ARCH_S3C2410 is not set # CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_S5P6440 is not set +# CONFIG_ARCH_S5P6442 is not set CONFIG_ARCH_S5PC100=y +# CONFIG_ARCH_S5PV210 is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set # CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set -CONFIG_PLAT_S3C=y +# CONFIG_PLAT_SPEAR is not set +CONFIG_PLAT_SAMSUNG=y # # Boot options # # CONFIG_S3C_BOOT_ERROR_RESET is not set CONFIG_S3C_BOOT_UART_FORCE_FIFO=y +CONFIG_S3C_LOWLEVEL_UART_PORT=0 +CONFIG_SAMSUNG_CLKSRC=y +CONFIG_SAMSUNG_IRQ_VIC_TIMER=y +CONFIG_SAMSUNG_IRQ_UART=y +CONFIG_SAMSUNG_GPIOLIB_4BIT=y +CONFIG_S3C_GPIO_CFG_S3C24XX=y +CONFIG_S3C_GPIO_CFG_S3C64XX=y +CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S5P_GPIO_DRVSTR=y +CONFIG_SAMSUNG_GPIO_EXTRA=0 +CONFIG_S3C_GPIO_SPACE=0 +CONFIG_S3C_GPIO_TRACK=y +# CONFIG_S3C_ADC is not set +CONFIG_S3C_DEV_HSMMC=y +CONFIG_S3C_DEV_HSMMC1=y +CONFIG_S3C_DEV_HSMMC2=y +CONFIG_S3C_DEV_I2C1=y +CONFIG_S3C_DEV_FB=y +CONFIG_S3C_PL330_DMA=y # # Power management # -CONFIG_S3C_LOWLEVEL_UART_PORT=0 -CONFIG_S3C_GPIO_SPACE=0 -CONFIG_S3C_GPIO_TRACK=y -CONFIG_S3C_GPIO_PULL_UPDOWN=y -CONFIG_PLAT_S5PC1XX=y -CONFIG_CPU_S5PC100_INIT=y -CONFIG_CPU_S5PC100_CLOCK=y -CONFIG_S5PC100_SETUP_I2C0=y +CONFIG_PLAT_S5P=y +CONFIG_S5P_EXT_INT=y CONFIG_CPU_S5PC100=y +CONFIG_S5PC100_SETUP_FB_24BPP=y +CONFIG_S5PC100_SETUP_I2C1=y +CONFIG_S5PC100_SETUP_SDHCI=y +CONFIG_S5PC100_SETUP_SDHCI_GPIO=y CONFIG_MACH_SMDKC100=y # @@ -206,7 +278,7 @@ CONFIG_CPU_32v6K=y CONFIG_CPU_V7=y CONFIG_CPU_32v7=y CONFIG_CPU_ABRT_EV7=y -CONFIG_CPU_PABRT_IFAR=y +CONFIG_CPU_PABRT_V7=y CONFIG_CPU_CACHE_V7=y CONFIG_CPU_CACHE_VIPT=y CONFIG_CPU_COPY_V6=y @@ -224,11 +296,15 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_HAS_TLS_REG=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_430973 is not set # CONFIG_ARM_ERRATA_458693 is not set # CONFIG_ARM_ERRATA_460075 is not set CONFIG_ARM_VIC=y CONFIG_ARM_VIC_NR=2 +CONFIG_PL330=y # # Bus support @@ -244,8 +320,11 @@ CONFIG_VMSPLIT_3G=y # CONFIG_VMSPLIT_2G is not set # CONFIG_VMSPLIT_1G is not set CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set CONFIG_HZ=100 +# CONFIG_THUMB2_KERNEL is not set CONFIG_AEABI=y CONFIG_OABI_COMPAT=y # CONFIG_ARCH_SPARSEMEM_DEFAULT is not set @@ -258,12 +337,11 @@ CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_SPLIT_PTLOCK_CPUS=999999 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y -CONFIG_HAVE_MLOCK=y -CONFIG_HAVE_MLOCKED_PAGE_BIT=y +# CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ALIGNMENT_TRAP=y # CONFIG_UACCESS_WITH_MEMCPY is not set @@ -274,6 +352,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=cramfs init=/linuxrc console=ttySAC2,115200 mem=128M" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -317,6 +396,7 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y # Generic Driver Options # CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y @@ -331,6 +411,10 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_COW_COMMON is not set CONFIG_BLK_DEV_LOOP=y # CONFIG_BLK_DEV_CRYPTOLOOP is not set + +# +# DRBD disabled because PROC_FS, INET or CONNECTOR not selected +# CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 @@ -338,9 +422,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_CDROM_PKTCDVD is not set # CONFIG_MG_DISK is not set CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_ISL29003 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_DS1682 is not set # CONFIG_C2PORT is not set # @@ -350,18 +437,21 @@ CONFIG_EEPROM_AT24=y # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set # CONFIG_EEPROM_93CX6 is not set +# CONFIG_IWMC3200TOP is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # # SCSI device support # +CONFIG_SCSI_MOD=y # CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # CONFIG_SCSI_DMA is not set # CONFIG_SCSI_NETLINK is not set # CONFIG_ATA is not set # CONFIG_MD is not set +# CONFIG_PHONE is not set # # Input device support @@ -369,6 +459,7 @@ CONFIG_HAVE_IDE=y CONFIG_INPUT=y # CONFIG_INPUT_FF_MEMLESS is not set # CONFIG_INPUT_POLLDEV is not set +# CONFIG_INPUT_SPARSEKMAP is not set # # Userland interfaces @@ -385,13 +476,19 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set # CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set CONFIG_INPUT_MOUSE=y CONFIG_MOUSE_PS2=y CONFIG_MOUSE_PS2_ALPS=y @@ -399,6 +496,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y CONFIG_MOUSE_PS2_SYNAPTICS=y CONFIG_MOUSE_PS2_TRACKPOINT=y # CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_SENTELIC is not set # CONFIG_MOUSE_PS2_TOUCHKIT is not set # CONFIG_MOUSE_SERIAL is not set # CONFIG_MOUSE_APPLETOUCH is not set @@ -418,6 +516,7 @@ CONFIG_SERIO=y CONFIG_SERIO_SERPORT=y CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set # CONFIG_GAMEPORT is not set # @@ -444,11 +543,16 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 # Non-8250 serial port support # CONFIG_SERIAL_SAMSUNG=y -CONFIG_SERIAL_SAMSUNG_UARTS=3 +CONFIG_SERIAL_SAMSUNG_UARTS_4=y +CONFIG_SERIAL_SAMSUNG_UARTS=4 # CONFIG_SERIAL_SAMSUNG_DEBUG is not set CONFIG_SERIAL_SAMSUNG_CONSOLE=y +CONFIG_SERIAL_S3C6400=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -461,6 +565,7 @@ CONFIG_HW_RANDOM=y # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_HELPER_AUTO=y @@ -471,9 +576,11 @@ CONFIG_I2C_HELPER_AUTO=y # # I2C system bus drivers (mostly embedded / system-on-chip) # +# CONFIG_I2C_DESIGNWARE is not set # CONFIG_I2C_GPIO is not set # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -486,20 +593,15 @@ CONFIG_I2C_HELPER_AUTO=y # # CONFIG_I2C_PCA_PLATFORM is not set # CONFIG_I2C_STUB is not set - -# -# Miscellaneous I2C Chip support -# -# CONFIG_DS1682 is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_I2C_DEBUG_CHIP is not set # CONFIG_SPI is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -508,13 +610,16 @@ CONFIG_GPIOLIB=y # # Memory mapped GPIO expanders: # +# CONFIG_GPIO_IT8761E is not set # # I2C GPIO expanders: # +# CONFIG_GPIO_MAX7300 is not set # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -523,10 +628,19 @@ CONFIG_GPIOLIB=y # # SPI GPIO expanders: # + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# # CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set # CONFIG_SENSORS_ADM1021 is not set @@ -535,10 +649,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7473 is not set # CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set @@ -549,6 +664,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set # CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM73 is not set # CONFIG_SENSORS_LM75 is not set # CONFIG_SENSORS_LM77 is not set # CONFIG_SENSORS_LM78 is not set @@ -573,8 +689,10 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set # CONFIG_SENSORS_VT1211 is not set # CONFIG_SENSORS_W83781D is not set # CONFIG_SENSORS_W83791D is not set @@ -584,9 +702,8 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set -# CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_SENSORS_LIS3_I2C is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set CONFIG_SSB_POSSIBLE=y @@ -599,10 +716,12 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_ASIC3 is not set # CONFIG_HTC_EGPIO is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set # CONFIG_TPS65010 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set @@ -610,10 +729,15 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_TC6387XB is not set # CONFIG_MFD_TC6393XB is not set # CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set +# CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set # @@ -637,7 +761,6 @@ CONFIG_DUMMY_CONSOLE=y # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y -CONFIG_HID_DEBUG=y # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set @@ -680,13 +803,12 @@ CONFIG_SDIO_UART=y CONFIG_MMC_SDHCI=y # CONFIG_MMC_SDHCI_PLTFM is not set # CONFIG_MEMSTICK is not set -# CONFIG_ACCESSIBILITY is not set # CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set -# CONFIG_REGULATOR is not set # CONFIG_UIO is not set # CONFIG_STAGING is not set @@ -710,6 +832,7 @@ CONFIG_FS_POSIX_ACL=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y @@ -758,6 +881,7 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set CONFIG_CRAMFS=y # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set @@ -772,7 +896,6 @@ CONFIG_ROMFS_BACKED_BY_BLOCK=y CONFIG_ROMFS_ON_BLOCK=y # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set # # Partition Types @@ -789,6 +912,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set @@ -826,11 +950,13 @@ CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set CONFIG_SYSCTL_SYSCALL_CHECK=y @@ -839,6 +965,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -849,6 +976,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -857,8 +985,9 @@ CONFIG_DEBUG_USER=y CONFIG_DEBUG_ERRORS=y # CONFIG_DEBUG_STACK_USAGE is not set CONFIG_DEBUG_LL=y +# CONFIG_EARLY_PRINTK is not set # CONFIG_DEBUG_ICEDCC is not set -CONFIG_DEBUG_S3C_PORT=y +# CONFIG_OC_ETM is not set CONFIG_DEBUG_S3C_UART=0 # @@ -867,7 +996,11 @@ CONFIG_DEBUG_S3C_UART=0 # CONFIG_KEYS is not set # CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_SMACK is not set +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_DEFAULT_SECURITY="" # CONFIG_CRYPTO is not set # CONFIG_BINARY_PRINTF is not set @@ -884,8 +1017,10 @@ CONFIG_CRC32=y # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y +CONFIG_LZO_DECOMPRESS=y CONFIG_DECOMPRESS_GZIP=y CONFIG_DECOMPRESS_BZIP2=y CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y CONFIG_HAS_IOMEM=y CONFIG_HAS_DMA=y diff --git a/arch/arm/configs/s5pc110_defconfig b/arch/arm/configs/s5pc110_defconfig index 796cb78498c3..c4de360b0f69 100644 --- a/arch/arm/configs/s5pc110_defconfig +++ b/arch/arm/configs/s5pc110_defconfig @@ -1,11 +1,13 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:18:21 2010 +# Wed May 26 19:04:37 2010 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -180,9 +183,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -218,7 +223,7 @@ CONFIG_MMU=y # CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_S5P6440 is not set # CONFIG_ARCH_S5P6442 is not set -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set CONFIG_ARCH_S5PV210=y # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -227,6 +232,7 @@ CONFIG_ARCH_S5PV210=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -242,16 +248,22 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y CONFIG_S3C_GPIO_CFG_S3C24XX=y CONFIG_S3C_GPIO_CFG_S3C64XX=y CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S5P_GPIO_DRVSTR=y CONFIG_SAMSUNG_GPIO_EXTRA=0 CONFIG_S3C_GPIO_SPACE=0 CONFIG_S3C_GPIO_TRACK=y # CONFIG_S3C_ADC is not set +CONFIG_S3C_DEV_WDT=y +CONFIG_S3C_PL330_DMA=y # # Power management # CONFIG_PLAT_S5P=y +CONFIG_S5P_EXT_INT=y CONFIG_CPU_S5PV210=y +# CONFIG_MACH_AQUILA is not set +# CONFIG_MACH_GONI is not set # CONFIG_MACH_SMDKV210 is not set CONFIG_MACH_SMDKC110=y @@ -281,12 +293,14 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_HAS_TLS_REG=y CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_430973 is not set # CONFIG_ARM_ERRATA_458693 is not set # CONFIG_ARM_ERRATA_460075 is not set CONFIG_ARM_VIC=y CONFIG_ARM_VIC_NR=2 +CONFIG_PL330=y # # Bus support @@ -335,6 +349,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -481,6 +496,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set @@ -536,6 +552,8 @@ CONFIG_SERIAL_S5PV210=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -583,6 +601,7 @@ CONFIG_GPIOLIB=y # CONFIG_HWMON is not set # CONFIG_THERMAL is not set # CONFIG_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y CONFIG_SSB_POSSIBLE=y # @@ -635,10 +654,6 @@ CONFIG_RTC_LIB=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -847,6 +862,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -857,6 +874,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig index 6831dab97d96..e2f5bce29828 100644 --- a/arch/arm/configs/s5pv210_defconfig +++ b/arch/arm/configs/s5pv210_defconfig @@ -1,11 +1,13 @@ # # Automatically generated make config: don't edit # Linux kernel version: 2.6.34 -# Sat May 22 03:18:22 2010 +# Wed May 26 19:04:39 2010 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_ARCH_USES_GETTIMEOFFSET=y CONFIG_HAVE_PROC_CPU=y CONFIG_NO_IOPORT=y CONFIG_GENERIC_HARDIRQS=y @@ -35,6 +37,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_KERNEL_GZIP=y # CONFIG_KERNEL_BZIP2 is not set @@ -180,9 +183,11 @@ CONFIG_MMU=y # CONFIG_ARCH_INTEGRATOR is not set # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_VEXPRESS is not set # CONFIG_ARCH_AT91 is not set # CONFIG_ARCH_BCMRING is not set # CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CNS3XXX is not set # CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set @@ -218,7 +223,7 @@ CONFIG_MMU=y # CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_S5P6440 is not set # CONFIG_ARCH_S5P6442 is not set -# CONFIG_ARCH_S5PC1XX is not set +# CONFIG_ARCH_S5PC100 is not set CONFIG_ARCH_S5PV210=y # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set @@ -227,6 +232,7 @@ CONFIG_ARCH_S5PV210=y # CONFIG_ARCH_NOMADIK is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set +# CONFIG_PLAT_SPEAR is not set CONFIG_PLAT_SAMSUNG=y # @@ -242,16 +248,24 @@ CONFIG_SAMSUNG_GPIOLIB_4BIT=y CONFIG_S3C_GPIO_CFG_S3C24XX=y CONFIG_S3C_GPIO_CFG_S3C64XX=y CONFIG_S3C_GPIO_PULL_UPDOWN=y +CONFIG_S5P_GPIO_DRVSTR=y CONFIG_SAMSUNG_GPIO_EXTRA=0 CONFIG_S3C_GPIO_SPACE=0 CONFIG_S3C_GPIO_TRACK=y # CONFIG_S3C_ADC is not set +CONFIG_S3C_DEV_WDT=y +CONFIG_SAMSUNG_DEV_ADC=y +CONFIG_SAMSUNG_DEV_TS=y +CONFIG_S3C_PL330_DMA=y # # Power management # CONFIG_PLAT_S5P=y +CONFIG_S5P_EXT_INT=y CONFIG_CPU_S5PV210=y +# CONFIG_MACH_AQUILA is not set +# CONFIG_MACH_GONI is not set CONFIG_MACH_SMDKV210=y # CONFIG_MACH_SMDKC110 is not set @@ -281,12 +295,14 @@ CONFIG_ARM_THUMB=y # CONFIG_CPU_BPREDICT_DISABLE is not set CONFIG_HAS_TLS_REG=y CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y CONFIG_CPU_HAS_PMU=y # CONFIG_ARM_ERRATA_430973 is not set # CONFIG_ARM_ERRATA_458693 is not set # CONFIG_ARM_ERRATA_460075 is not set CONFIG_ARM_VIC=y CONFIG_ARM_VIC_NR=2 +CONFIG_PL330=y # # Bus support @@ -335,6 +351,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0 CONFIG_ZBOOT_ROM_BSS=0 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" +# CONFIG_CMDLINE_FORCE is not set # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -481,7 +498,9 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_S3C2410 is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set # CONFIG_TOUCHSCREEN_WACOM_W8001 is not set @@ -536,6 +555,8 @@ CONFIG_SERIAL_S5PV210=y CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_TIMBERDALE is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y @@ -583,6 +604,7 @@ CONFIG_GPIOLIB=y # CONFIG_HWMON is not set # CONFIG_THERMAL is not set # CONFIG_WATCHDOG is not set +CONFIG_HAVE_S3C2410_WATCHDOG=y CONFIG_SSB_POSSIBLE=y # @@ -635,10 +657,6 @@ CONFIG_RTC_LIB=y # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set - -# -# TI VLYNQ -# # CONFIG_STAGING is not set # @@ -847,6 +865,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_TRACING_SUPPORT=y CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set # CONFIG_SCHED_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set # CONFIG_BOOT_TRACER is not set @@ -857,6 +877,7 @@ CONFIG_BRANCH_PROFILE_NONE=y # CONFIG_KMEMTRACE is not set # CONFIG_WORKQUEUE_TRACER is not set # CONFIG_BLK_DEV_IO_TRACE is not set +# CONFIG_ATOMIC64_SELFTEST is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index bcda59f39941..2f87870d9347 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -3,9 +3,6 @@ #include #include - #include -#undef ARCH_HAS_SG_CHAIN - #endif /* _ASMARM_SCATTERLIST_H */ diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index abd04932917b..2ec3095ffb7b 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -24,6 +25,8 @@ #include #include #include +#include +#include #include #include @@ -533,10 +536,24 @@ struct regulator_init_data tps65070_regulator_data[] = { }, }; +static struct touchscreen_init_data tps6507x_touchscreen_data = { + .poll_period = 30, /* ms between touch samples */ + .min_pressure = 0x30, /* minimum pressure to trigger touch */ + .vref = 0, /* turn off vref when not using A/D */ + .vendor = 0, /* /sys/class/input/input?/id/vendor */ + .product = 65070, /* /sys/class/input/input?/id/product */ + .version = 0x100, /* /sys/class/input/input?/id/version */ +}; + +static struct tps6507x_board tps_board = { + .tps6507x_pmic_init_data = &tps65070_regulator_data[0], + .tps6507x_ts_init_data = &tps6507x_touchscreen_data, +}; + static struct i2c_board_info __initdata da850evm_tps65070_info[] = { { I2C_BOARD_INFO("tps6507x", 0x48), - .platform_data = &tps65070_regulator_data[0], + .platform_data = &tps_board, }, }; diff --git a/arch/arm/mach-davinci/include/mach/mmc.h b/arch/arm/mach-davinci/include/mach/mmc.h index 5a85e24f3673..d4f1e9675069 100644 --- a/arch/arm/mach-davinci/include/mach/mmc.h +++ b/arch/arm/mach-davinci/include/mach/mmc.h @@ -22,6 +22,9 @@ struct davinci_mmc_config { /* Version of the MMC/SD controller */ u8 version; + + /* Number of sg segments */ + u8 nr_sg; }; void davinci_setup_mmc(int module, struct davinci_mmc_config *config); diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c index 33a8d35498a7..62b5e40165df 100644 --- a/arch/arm/mach-mx3/mach-mx31moboard.c +++ b/arch/arm/mach-mx3/mach-mx31moboard.c @@ -220,11 +220,54 @@ static struct mc13783_regulator_init_data moboard_regulators[] = { }, }; +static struct mc13783_led_platform_data moboard_led[] = { + { + .id = MC13783_LED_R1, + .name = "coreboard-led-4:red", + .max_current = 2, + }, + { + .id = MC13783_LED_G1, + .name = "coreboard-led-4:green", + .max_current = 2, + }, + { + .id = MC13783_LED_B1, + .name = "coreboard-led-4:blue", + .max_current = 2, + }, + { + .id = MC13783_LED_R2, + .name = "coreboard-led-5:red", + .max_current = 3, + }, + { + .id = MC13783_LED_G2, + .name = "coreboard-led-5:green", + .max_current = 3, + }, + { + .id = MC13783_LED_B2, + .name = "coreboard-led-5:blue", + .max_current = 3, + }, +}; + +static struct mc13783_leds_platform_data moboard_leds = { + .num_leds = ARRAY_SIZE(moboard_led), + .led = moboard_led, + .flags = MC13783_LED_SLEWLIMTC, + .abmode = MC13783_LED_AB_DISABLED, + .tc1_period = MC13783_LED_PERIOD_10MS, + .tc2_period = MC13783_LED_PERIOD_10MS, +}; + static struct mc13783_platform_data moboard_pmic = { .regulators = moboard_regulators, .num_regulators = ARRAY_SIZE(moboard_regulators), + .leds = &moboard_leds, .flags = MC13783_USE_REGULATOR | MC13783_USE_RTC | - MC13783_USE_ADC, + MC13783_USE_ADC | MC13783_USE_LED, }; static struct spi_board_info moboard_spi_board_info[] __initdata = { diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index e7d629b3c76a..f474a80b8867 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -137,9 +137,7 @@ static void ads7846_dev_init(void) } gpio_direction_input(ts_gpio); - - omap_set_gpio_debounce(ts_gpio, 1); - omap_set_gpio_debounce_time(ts_gpio, 0xa); + gpio_set_debounce(ts_gpio, 310); } static int ads7846_get_pendown_state(void) diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 5fcb52e71298..fefd7e6e9779 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -209,8 +209,7 @@ static void ads7846_dev_init(void) } gpio_direction_input(ts_gpio); - omap_set_gpio_debounce(ts_gpio, 1); - omap_set_gpio_debounce_time(ts_gpio, 0xa); + gpio_set_debounce(ts_gpio, 310); } static int ads7846_get_pendown_state(void) diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 81bba194b030..b95261013812 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -579,9 +579,7 @@ static void ads7846_dev_init(void) printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); gpio_direction_input(OMAP3_EVM_TS_GPIO); - - omap_set_gpio_debounce(OMAP3_EVM_TS_GPIO, 1); - omap_set_gpio_debounce_time(OMAP3_EVM_TS_GPIO, 0xa); + gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310); } static int ads7846_get_pendown_state(void) diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 395d049bf010..db06dc910ba7 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -130,8 +130,8 @@ static struct platform_device pandora_keys_gpio = { static void __init pandora_keys_gpio_init(void) { /* set debounce time for GPIO banks 4 and 6 */ - omap_set_gpio_debounce_time(32 * 3, GPIO_DEBOUNCE_TIME); - omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME); + gpio_set_debounce(32 * 3, GPIO_DEBOUNCE_TIME); + gpio_set_debounce(32 * 5, GPIO_DEBOUNCE_TIME); } static int board_keymap[] = { diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 2504d41f923e..2f5f8233dd5b 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -328,8 +328,7 @@ static void __init omap3_ads7846_init(void) } gpio_direction_input(OMAP3_TS_GPIO); - omap_set_gpio_debounce(OMAP3_TS_GPIO, 1); - omap_set_gpio_debounce_time(OMAP3_TS_GPIO, 0xa); + gpio_set_debounce(OMAP3_TS_GPIO, 310); } static struct ads7846_platform_data ads7846_config = { diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c index 685f34a9634b..fe0de1698edc 100644 --- a/arch/arm/mach-orion5x/dns323-setup.c +++ b/arch/arm/mach-orion5x/dns323-setup.c @@ -240,22 +240,23 @@ error_fail: #define ORION_BLINK_HALF_PERIOD 100 /* ms */ -static int dns323_gpio_blink_set(unsigned gpio, +static int dns323_gpio_blink_set(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off) { - static int value = 0; - if (!*delay_on && !*delay_off) + if (delay_on && delay_off && !*delay_on && !*delay_off) *delay_on = *delay_off = ORION_BLINK_HALF_PERIOD; - if (ORION_BLINK_HALF_PERIOD == *delay_on - && ORION_BLINK_HALF_PERIOD == *delay_off) { - value = !value; - orion_gpio_set_blink(gpio, value); - return 0; + switch(state) { + case GPIO_LED_NO_BLINK_LOW: + case GPIO_LED_NO_BLINK_HIGH: + orion_gpio_set_blink(gpio, 0); + gpio_set_value(gpio, state); + break; + case GPIO_LED_BLINK: + orion_gpio_set_blink(gpio, 1); } - - return -EINVAL; + return 0; } static struct gpio_led dns323_leds[] = { @@ -263,6 +264,7 @@ static struct gpio_led dns323_leds[] = { .name = "power:blue", .gpio = DNS323_GPIO_LED_POWER2, .default_trigger = "timer", + .active_low = 1, }, { .name = "right:amber", .gpio = DNS323_GPIO_LED_RIGHT_AMBER, diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 45799c608d8f..9e39faa283b9 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c @@ -49,7 +49,6 @@ #include #include -#include #include #include @@ -57,6 +56,7 @@ #include #include #include +#include #include #include @@ -254,6 +254,12 @@ static char *gta02_batteries[] = { "battery", }; +static struct pcf50633_bl_platform_data gta02_backlight_data = { + .default_brightness = 0x3f, + .default_brightness_limit = 0, + .ramp_time = 5, +}; + struct pcf50633_platform_data gta02_pcf_pdata = { .resumers = { [0] = PCF50633_INT1_USBINS | @@ -271,6 +277,8 @@ struct pcf50633_platform_data gta02_pcf_pdata = { .charger_reference_current_ma = 1000, + .backlight_data = >a02_backlight_data, + .reg_init_data = { [PCF50633_REGULATOR_AUTO] = { .constraints = { @@ -478,71 +486,6 @@ static struct s3c2410_udc_mach_info gta02_udc_cfg = { }; - - -static void gta02_bl_set_intensity(int intensity) -{ - struct pcf50633 *pcf = gta02_pcf; - int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT); - - /* We map 8-bit intensity to 6-bit intensity in hardware. */ - intensity >>= 2; - - /* - * This can happen during, eg, print of panic on blanked console, - * but we can't service i2c without interrupts active, so abort. - */ - if (in_atomic()) { - printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n"); - return; - } - - old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT); - if (intensity == old_intensity) - return; - - /* We can't do this anywhere else. */ - pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5); - - if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3)) - old_intensity = 0; - - /* - * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60) - * if seen, you have to re-enable the LED unit. - */ - if (!intensity || !old_intensity) - pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0); - - /* Illegal to set LEDOUT to 0. */ - if (!intensity) - pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2); - else - pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, - intensity); - - if (intensity) - pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2); - -} - -static struct generic_bl_info gta02_bl_info = { - .name = "gta02-bl", - .max_intensity = 0xff, - .default_intensity = 0xff, - .set_bl_intensity = gta02_bl_set_intensity, -}; - -static struct platform_device gta02_bl_dev = { - .name = "generic-bl", - .id = 1, - .dev = { - .platform_data = >a02_bl_info, - }, -}; - - - /* USB */ static struct s3c2410_hcd_info gta02_usb_info __initdata = { .port[0] = { @@ -579,7 +522,6 @@ static struct platform_device *gta02_devices[] __initdata = { /* These guys DO need to be children of PMU. */ static struct platform_device *gta02_devices_pmu_children[] = { - >a02_bl_dev, }; diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c index 7a4138beb665..fbd85a9b7bbf 100644 --- a/arch/arm/mach-s3c64xx/clock.c +++ b/arch/arm/mach-s3c64xx/clock.c @@ -258,6 +258,12 @@ static struct clk init_clocks[] = { .parent = &clk_h, .enable = s3c64xx_hclk_ctrl, .ctrlbit = S3C_CLKCON_HCLK_HSMMC2, + }, { + .name = "otg", + .id = -1, + .parent = &clk_h, + .enable = s3c64xx_hclk_ctrl, + .ctrlbit = S3C_CLKCON_HCLK_USB, }, { .name = "timers", .id = -1, diff --git a/arch/arm/mach-s5p6440/include/mach/irqs.h b/arch/arm/mach-s5p6440/include/mach/irqs.h index a4b9b40d18f2..911854d9ad42 100644 --- a/arch/arm/mach-s5p6440/include/mach/irqs.h +++ b/arch/arm/mach-s5p6440/include/mach/irqs.h @@ -72,7 +72,14 @@ #define S5P_IRQ_EINT_BASE (S5P_IRQ_VIC1(31) + 6) #define S5P_EINT(x) ((x) + S5P_IRQ_EINT_BASE) -#define IRQ_EINT(x) S5P_EINT(x) + +#define S5P_EINT_BASE1 (S5P_IRQ_EINT_BASE) +/* + * S5P6440 has 0-15 external interrupts in group 0. Only these can be used + * to wake up from sleep. If request is beyond this range, by mistake, a large + * return value for an irq number should be indication of something amiss. + */ +#define S5P_EINT_BASE2 (0xf0000000) /* * Next the external interrupt groups. These are similar to the IRQ_EINT(x) diff --git a/arch/arm/mach-s5p6442/include/mach/irqs.h b/arch/arm/mach-s5p6442/include/mach/irqs.h index da665809f6e4..02c23749c023 100644 --- a/arch/arm/mach-s5p6442/include/mach/irqs.h +++ b/arch/arm/mach-s5p6442/include/mach/irqs.h @@ -77,8 +77,9 @@ #define S5P_IRQ_EINT_BASE (IRQ_VIC_END + 1) -#define IRQ_EINT(x) ((x) < 16 ? S5P_IRQ_VIC0(x) : \ - (S5P_IRQ_EINT_BASE + (x)-16)) +#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0)) +#define S5P_EINT_BASE2 (S5P_IRQ_EINT_BASE) + /* Set the default NR_IRQS */ #define NR_IRQS (IRQ_EINT(31) + 1) diff --git a/arch/arm/mach-s5pc100/include/mach/irqs.h b/arch/arm/mach-s5pc100/include/mach/irqs.h index 15066df3ced9..28aa551dc3a8 100644 --- a/arch/arm/mach-s5pc100/include/mach/irqs.h +++ b/arch/arm/mach-s5pc100/include/mach/irqs.h @@ -100,9 +100,6 @@ #define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0)) #define S5P_EINT_BASE2 (IRQ_VIC_END + 1) -#define IRQ_EINT(x) ((x) < 16 ? S5P_IRQ_VIC0(x) : \ - (S5P_EINT_BASE2 + (x) - 16)) - #define S3C_IRQ_GPIO_BASE (IRQ_EINT(31) + 1) #define S3C_IRQ_GPIO(x) (S3C_IRQ_GPIO_BASE + (x)) diff --git a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h index 763edebdd577..dd6295e1251d 100644 --- a/arch/arm/mach-s5pc100/include/mach/regs-gpio.h +++ b/arch/arm/mach-s5pc100/include/mach/regs-gpio.h @@ -60,12 +60,9 @@ #define S5PC100EINT30PEND (S5P_VA_GPIO + 0xF40) #define S5P_EINT_PEND(x) (S5PC100EINT30PEND + ((x) * 0x4)) -#define eint_offset(irq) ((irq) < IRQ_EINT16_31 ? ((irq) - IRQ_EINT(0)) : \ - (((irq) - S5P_EINT_BASE2))) +#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3) -#define EINT_REG_NR(x) (eint_offset(x) >> 3) - -#define eint_irq_to_bit(irq) (1 << (eint_offset(irq) & 0x7)) +#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7)) /* values for S5P_EXTINT0 */ #define S5P_EXTINT_LOWLEV (0x00) diff --git a/arch/arm/mach-s5pv210/include/mach/irqs.h b/arch/arm/mach-s5pv210/include/mach/irqs.h index 92fc6c7fc064..96895378ea27 100644 --- a/arch/arm/mach-s5pv210/include/mach/irqs.h +++ b/arch/arm/mach-s5pv210/include/mach/irqs.h @@ -118,22 +118,12 @@ #define IRQ_MDNIE3 S5P_IRQ_VIC3(8) #define IRQ_VIC_END S5P_IRQ_VIC3(31) -#define S5P_EINT_16_31_BASE (IRQ_VIC_END + 1) - -#define EINT_MODE S3C_GPIO_SFN(0xf) - -#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_IRQ_VIC0(0)) \ - : ((x) + S5P_EINT_16_31_BASE)) +#define S5P_EINT_BASE1 (S5P_IRQ_VIC0(0)) +#define S5P_EINT_BASE2 (IRQ_VIC_END + 1) /* Set the default NR_IRQS */ - #define NR_IRQS (IRQ_EINT(31) + 1) -#define EINT_GPIO_0(x) S5PV210_GPH0(x) -#define EINT_GPIO_1(x) S5PV210_GPH1(x) -#define EINT_GPIO_2(x) S5PV210_GPH2(x) -#define EINT_GPIO_3(x) S5PV210_GPH3(x) - /* Compatibility */ #define IRQ_LCD_FIFO IRQ_LCD0 #define IRQ_LCD_VSYNC IRQ_LCD1 diff --git a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h index 6d068091c36c..49e029b4978a 100644 --- a/arch/arm/mach-s5pv210/include/mach/regs-gpio.h +++ b/arch/arm/mach-s5pv210/include/mach/regs-gpio.h @@ -27,12 +27,9 @@ #define S5PV210_EINT30PEND (S5P_VA_GPIO + 0xF40) #define S5P_EINT_PEND(x) (S5PV210_EINT30PEND + ((x) * 0x4)) -#define eint_offset(irq) ((irq) < IRQ_EINT16_31 ? ((irq) - IRQ_EINT(0)) \ - : ((irq) - S5P_EINT_16_31_BASE)) +#define EINT_REG_NR(x) (EINT_OFFSET(x) >> 3) -#define EINT_REG_NR(x) (eint_offset(x) >> 3) - -#define eint_irq_to_bit(irq) (1 << (eint_offset(irq) & 0x7)) +#define eint_irq_to_bit(irq) (1 << (EINT_OFFSET(irq) & 0x7)) /* values for S5P_EXTINT0 */ #define S5P_EXTINT_LOWLEV (0x00) @@ -41,4 +38,11 @@ #define S5P_EXTINT_RISEEDGE (0x03) #define S5P_EXTINT_BOTHEDGE (0x04) +#define EINT_MODE S3C_GPIO_SFN(0xf) + +#define EINT_GPIO_0(x) S5PV210_GPH0(x) +#define EINT_GPIO_1(x) S5PV210_GPH1(x) +#define EINT_GPIO_2(x) S5PV210_GPH2(x) +#define EINT_GPIO_3(x) S5PV210_GPH3(x) + #endif /* __ASM_ARCH_REGS_GPIO_H */ diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c index c73ed06b6065..f0394baa11fa 100644 --- a/arch/arm/mach-u300/i2c.c +++ b/arch/arm/mach-u300/i2c.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include @@ -46,6 +46,7 @@ /* BUCK SLEEP 0xAC: 1.05V, Not used, SLEEP_A and B, Not used */ #define BUCK_SLEEP_SETTING 0xAC +#ifdef CONFIG_AB3100_CORE static struct regulator_consumer_supply supply_ldo_c[] = { { .dev_name = "ab3100-codec", @@ -253,14 +254,68 @@ static struct ab3100_platform_data ab3100_plf_data = { LDO_D_SETTING, }, }; +#endif + +#ifdef CONFIG_AB3550_CORE +static struct abx500_init_settings ab3550_init_settings[] = { + { + .bank = 0, + .reg = AB3550_IMR1, + .setting = 0xff + }, + { + .bank = 0, + .reg = AB3550_IMR2, + .setting = 0xff + }, + { + .bank = 0, + .reg = AB3550_IMR3, + .setting = 0xff + }, + { + .bank = 0, + .reg = AB3550_IMR4, + .setting = 0xff + }, + { + .bank = 0, + .reg = AB3550_IMR5, + /* The two most significant bits are not used */ + .setting = 0x3f + }, +}; + +static struct ab3550_platform_data ab3550_plf_data = { + .irq = { + .base = IRQ_AB3550_BASE, + .count = (IRQ_AB3550_END - IRQ_AB3550_BASE + 1), + }, + .dev_data = { + }, + .init_settings = ab3550_init_settings, + .init_settings_sz = ARRAY_SIZE(ab3550_init_settings), +}; +#endif static struct i2c_board_info __initdata bus0_i2c_board_info[] = { +#if defined(CONFIG_AB3550_CORE) + { + .type = "ab3550", + .addr = 0x4A, + .irq = IRQ_U300_IRQ0_EXT, + .platform_data = &ab3550_plf_data, + }, +#elif defined(CONFIG_AB3100_CORE) { .type = "ab3100", .addr = 0x48, .irq = IRQ_U300_IRQ0_EXT, .platform_data = &ab3100_plf_data, }, +#else + { }, +#endif }; static struct i2c_board_info __initdata bus1_i2c_board_info[] = { diff --git a/arch/arm/mach-u300/include/mach/irqs.h b/arch/arm/mach-u300/include/mach/irqs.h index a6867b12773e..09b1b28fa8fd 100644 --- a/arch/arm/mach-u300/include/mach/irqs.h +++ b/arch/arm/mach-u300/include/mach/irqs.h @@ -109,6 +109,13 @@ #define U300_NR_IRQS 48 #endif +#ifdef CONFIG_AB3550_CORE +#define IRQ_AB3550_BASE (U300_NR_IRQS) +#define IRQ_AB3550_END (IRQ_AB3550_BASE + 37) + +#define NR_IRQS (IRQ_AB3550_END + 1) +#else #define NR_IRQS U300_NR_IRQS +#endif #endif diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index 072196c57263..bb8d7b771817 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -50,7 +50,7 @@ struct pl022_config_chip ab4500_chip_info = { static struct spi_board_info u8500_spi_devices[] = { { - .modalias = "ab4500", + .modalias = "ab8500", .controller_data = &ab4500_chip_info, .max_speed_hz = 12000000, .bus_num = 0, diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c index 1b2c9890e8b4..6544855af2f1 100644 --- a/arch/arm/mach-ux500/clock.c +++ b/arch/arm/mach-ux500/clock.c @@ -411,7 +411,7 @@ static struct clk_lookup u8500_common_clks[] = { CLK(apetraceclk, "apetrace", NULL), CLK(mcdeclk, "mcde", NULL), CLK(ipi2clk, "ipi2", NULL), - CLK(dmaclk, "dma40", NULL), + CLK(dmaclk, "dma40.0", NULL), CLK(b2r2clk, "b2r2", NULL), CLK(tvclk, "tv", NULL), }; diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index d04299f3b6b5..f21c444edd99 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -32,6 +32,7 @@ static struct platform_device *platform_devs[] __initdata = { &u8500_gpio_devs[6], &u8500_gpio_devs[7], &u8500_gpio_devs[8], + &u8500_dma40_device, }; /* minimum static i/o mapping required to boot U8500 platforms */ @@ -71,6 +72,9 @@ void __init u8500_init_devices(void) { ux500_init_devices(); + if (cpu_is_u8500ed()) + dma40_u8500ed_fixup(); + /* Register the platform devices */ platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c index 20334236afce..822903421943 100644 --- a/arch/arm/mach-ux500/devices-db8500.c +++ b/arch/arm/mach-ux500/devices-db8500.c @@ -12,9 +12,13 @@ #include #include +#include + #include #include +#include "ste-dma40-db8500.h" + static struct nmk_gpio_platform_data u8500_gpio_data[] = { GPIO_DATA("GPIO-0-31", 0), GPIO_DATA("GPIO-32-63", 32), /* 37..63 not routed to pin */ @@ -105,3 +109,108 @@ struct platform_device u8500_i2c4_device = { .resource = u8500_i2c4_resources, .num_resources = ARRAY_SIZE(u8500_i2c4_resources), }; + +static struct resource dma40_resources[] = { + [0] = { + .start = U8500_DMA_BASE, + .end = U8500_DMA_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + .name = "base", + }, + [1] = { + .start = U8500_DMA_LCPA_BASE, + .end = U8500_DMA_LCPA_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + .name = "lcpa", + }, + [2] = { + .start = U8500_DMA_LCLA_BASE, + .end = U8500_DMA_LCLA_BASE + 16 * 1024 - 1, + .flags = IORESOURCE_MEM, + .name = "lcla", + }, + [3] = { + .start = IRQ_DMA, + .end = IRQ_DMA, + .flags = IORESOURCE_IRQ} +}; + +/* Default configuration for physcial memcpy */ +struct stedma40_chan_cfg dma40_memcpy_conf_phy = { + .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE | + STEDMA40_LOW_PRIORITY_CHANNEL | + STEDMA40_PCHAN_BASIC_MODE), + .dir = STEDMA40_MEM_TO_MEM, + + .src_info.endianess = STEDMA40_LITTLE_ENDIAN, + .src_info.data_width = STEDMA40_BYTE_WIDTH, + .src_info.psize = STEDMA40_PSIZE_PHY_1, + + .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, + .dst_info.data_width = STEDMA40_BYTE_WIDTH, + .dst_info.psize = STEDMA40_PSIZE_PHY_1, + +}; +/* Default configuration for logical memcpy */ +struct stedma40_chan_cfg dma40_memcpy_conf_log = { + .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE | + STEDMA40_LOW_PRIORITY_CHANNEL | + STEDMA40_LCHAN_SRC_LOG_DST_LOG | + STEDMA40_NO_TIM_FOR_LINK), + .dir = STEDMA40_MEM_TO_MEM, + + .src_info.endianess = STEDMA40_LITTLE_ENDIAN, + .src_info.data_width = STEDMA40_BYTE_WIDTH, + .src_info.psize = STEDMA40_PSIZE_LOG_1, + + .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, + .dst_info.data_width = STEDMA40_BYTE_WIDTH, + .dst_info.psize = STEDMA40_PSIZE_LOG_1, + +}; + +/* + * Mapping between destination event lines and physical device address. + * The event line is tied to a device and therefor the address is constant. + */ +static const dma_addr_t dma40_tx_map[STEDMA40_NR_DEV]; + +/* Mapping between source event lines and physical device address */ +static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV]; + +/* Reserved event lines for memcpy only */ +static int dma40_memcpy_event[] = { + STEDMA40_MEMCPY_TX_1, + STEDMA40_MEMCPY_TX_2, + STEDMA40_MEMCPY_TX_3, + STEDMA40_MEMCPY_TX_4, +}; + +static struct stedma40_platform_data dma40_plat_data = { + .dev_len = STEDMA40_NR_DEV, + .dev_rx = dma40_rx_map, + .dev_tx = dma40_tx_map, + .memcpy = dma40_memcpy_event, + .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), + .memcpy_conf_phy = &dma40_memcpy_conf_phy, + .memcpy_conf_log = &dma40_memcpy_conf_log, + .llis_per_log = 8, +}; + +struct platform_device u8500_dma40_device = { + .dev = { + .platform_data = &dma40_plat_data, + }, + .name = "dma40", + .id = 0, + .num_resources = ARRAY_SIZE(dma40_resources), + .resource = dma40_resources +}; + +void dma40_u8500ed_fixup(void) +{ + dma40_plat_data.memcpy = NULL; + dma40_plat_data.memcpy_len = 0; + dma40_resources[0].start = U8500_DMA_BASE_ED; + dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1; +} diff --git a/arch/arm/mach-ux500/include/mach/db8500-regs.h b/arch/arm/mach-ux500/include/mach/db8500-regs.h index 9169e1e382a3..85fc6a80b386 100644 --- a/arch/arm/mach-ux500/include/mach/db8500-regs.h +++ b/arch/arm/mach-ux500/include/mach/db8500-regs.h @@ -7,6 +7,18 @@ #ifndef __MACH_DB8500_REGS_H #define __MACH_DB8500_REGS_H +/* Base address and bank offsets for ESRAM */ +#define U8500_ESRAM_BASE 0x40000000 +#define U8500_ESRAM_BANK_SIZE 0x00020000 +#define U8500_ESRAM_BANK0 U8500_ESRAM_BASE +#define U8500_ESRAM_BANK1 (U8500_ESRAM_BASE + U8500_ESRAM_BANK_SIZE) +#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE) +#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE) +#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE) +/* Use bank 4 for DMA LCLA and LCPA */ +#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4 +#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK4 + 0x4000) + #define U8500_PER3_BASE 0x80000000 #define U8500_STM_BASE 0x80100000 #define U8500_STM_REG_BASE (U8500_STM_BASE + 0xF000) diff --git a/arch/arm/mach-ux500/include/mach/devices.h b/arch/arm/mach-ux500/include/mach/devices.h index 0422af00a56e..c2b2f2574947 100644 --- a/arch/arm/mach-ux500/include/mach/devices.h +++ b/arch/arm/mach-ux500/include/mach/devices.h @@ -25,5 +25,8 @@ extern struct platform_device ux500_i2c3_device; extern struct platform_device u8500_i2c0_device; extern struct platform_device u8500_i2c4_device; +extern struct platform_device u8500_dma40_device; + +void dma40_u8500ed_fixup(void); #endif diff --git a/arch/arm/mach-ux500/ste-dma40-db8500.h b/arch/arm/mach-ux500/ste-dma40-db8500.h new file mode 100644 index 000000000000..e7016278dfa9 --- /dev/null +++ b/arch/arm/mach-ux500/ste-dma40-db8500.h @@ -0,0 +1,154 @@ +/* + * arch/arm/mach-ux500/ste_dma40_db8500.h + * DB8500-SoC-specific configuration for DMA40 + * + * Copyright (C) ST-Ericsson 2007-2010 + * License terms: GNU General Public License (GPL) version 2 + * Author: Per Friden + * Author: Jonas Aaberg + */ +#ifndef STE_DMA40_DB8500_H +#define STE_DMA40_DB8500_H + +#define STEDMA40_NR_DEV 64 + +enum dma_src_dev_type { + STEDMA40_DEV_SPI0_RX = 0, + STEDMA40_DEV_SD_MMC0_RX = 1, + STEDMA40_DEV_SD_MMC1_RX = 2, + STEDMA40_DEV_SD_MMC2_RX = 3, + STEDMA40_DEV_I2C1_RX = 4, + STEDMA40_DEV_I2C3_RX = 5, + STEDMA40_DEV_I2C2_RX = 6, + STEDMA40_DEV_I2C4_RX = 7, /* Only on V1 */ + STEDMA40_DEV_SSP0_RX = 8, + STEDMA40_DEV_SSP1_RX = 9, + STEDMA40_DEV_MCDE_RX = 10, + STEDMA40_DEV_UART2_RX = 11, + STEDMA40_DEV_UART1_RX = 12, + STEDMA40_DEV_UART0_RX = 13, + STEDMA40_DEV_MSP2_RX = 14, + STEDMA40_DEV_I2C0_RX = 15, + STEDMA40_DEV_USB_OTG_IEP_8 = 16, + STEDMA40_DEV_USB_OTG_IEP_1_9 = 17, + STEDMA40_DEV_USB_OTG_IEP_2_10 = 18, + STEDMA40_DEV_USB_OTG_IEP_3_11 = 19, + STEDMA40_DEV_SLIM0_CH0_RX_HSI_RX_CH0 = 20, + STEDMA40_DEV_SLIM0_CH1_RX_HSI_RX_CH1 = 21, + STEDMA40_DEV_SLIM0_CH2_RX_HSI_RX_CH2 = 22, + STEDMA40_DEV_SLIM0_CH3_RX_HSI_RX_CH3 = 23, + STEDMA40_DEV_SRC_SXA0_RX_TX = 24, + STEDMA40_DEV_SRC_SXA1_RX_TX = 25, + STEDMA40_DEV_SRC_SXA2_RX_TX = 26, + STEDMA40_DEV_SRC_SXA3_RX_TX = 27, + STEDMA40_DEV_SD_MM2_RX = 28, + STEDMA40_DEV_SD_MM0_RX = 29, + STEDMA40_DEV_MSP1_RX = 30, + /* + * This channel is either SlimBus or MSP, + * never both at the same time. + */ + STEDMA40_SLIM0_CH0_RX = 31, + STEDMA40_DEV_MSP0_RX = 31, + STEDMA40_DEV_SD_MM1_RX = 32, + STEDMA40_DEV_SPI2_RX = 33, + STEDMA40_DEV_I2C3_RX2 = 34, + STEDMA40_DEV_SPI1_RX = 35, + STEDMA40_DEV_USB_OTG_IEP_4_12 = 36, + STEDMA40_DEV_USB_OTG_IEP_5_13 = 37, + STEDMA40_DEV_USB_OTG_IEP_6_14 = 38, + STEDMA40_DEV_USB_OTG_IEP_7_15 = 39, + STEDMA40_DEV_SPI3_RX = 40, + STEDMA40_DEV_SD_MM3_RX = 41, + STEDMA40_DEV_SD_MM4_RX = 42, + STEDMA40_DEV_SD_MM5_RX = 43, + STEDMA40_DEV_SRC_SXA4_RX_TX = 44, + STEDMA40_DEV_SRC_SXA5_RX_TX = 45, + STEDMA40_DEV_SRC_SXA6_RX_TX = 46, + STEDMA40_DEV_SRC_SXA7_RX_TX = 47, + STEDMA40_DEV_CAC1_RX = 48, + /* RX channels 49 and 50 are unused */ + STEDMA40_DEV_MSHC_RX = 51, + STEDMA40_DEV_SLIM1_CH0_RX_HSI_RX_CH4 = 52, + STEDMA40_DEV_SLIM1_CH1_RX_HSI_RX_CH5 = 53, + STEDMA40_DEV_SLIM1_CH2_RX_HSI_RX_CH6 = 54, + STEDMA40_DEV_SLIM1_CH3_RX_HSI_RX_CH7 = 55, + /* RX channels 56 thru 60 are unused */ + STEDMA40_DEV_CAC0_RX = 61, + /* RX channels 62 and 63 are unused */ +}; + +enum dma_dest_dev_type { + STEDMA40_DEV_SPI0_TX = 0, + STEDMA40_DEV_SD_MMC0_TX = 1, + STEDMA40_DEV_SD_MMC1_TX = 2, + STEDMA40_DEV_SD_MMC2_TX = 3, + STEDMA40_DEV_I2C1_TX = 4, + STEDMA40_DEV_I2C3_TX = 5, + STEDMA40_DEV_I2C2_TX = 6, + STEDMA50_DEV_I2C4_TX = 7, /* Only on V1 */ + STEDMA40_DEV_SSP0_TX = 8, + STEDMA40_DEV_SSP1_TX = 9, + /* TX channel 10 is unused */ + STEDMA40_DEV_UART2_TX = 11, + STEDMA40_DEV_UART1_TX = 12, + STEDMA40_DEV_UART0_TX= 13, + STEDMA40_DEV_MSP2_TX = 14, + STEDMA40_DEV_I2C0_TX = 15, + STEDMA40_DEV_USB_OTG_OEP_8 = 16, + STEDMA40_DEV_USB_OTG_OEP_1_9 = 17, + STEDMA40_DEV_USB_OTG_OEP_2_10= 18, + STEDMA40_DEV_USB_OTG_OEP_3_11 = 19, + STEDMA40_DEV_SLIM0_CH0_TX_HSI_TX_CH0 = 20, + STEDMA40_DEV_SLIM0_CH1_TX_HSI_TX_CH1 = 21, + STEDMA40_DEV_SLIM0_CH2_TX_HSI_TX_CH2 = 22, + STEDMA40_DEV_SLIM0_CH3_TX_HSI_TX_CH3 = 23, + STEDMA40_DEV_DST_SXA0_RX_TX = 24, + STEDMA40_DEV_DST_SXA1_RX_TX = 25, + STEDMA40_DEV_DST_SXA2_RX_TX = 26, + STEDMA40_DEV_DST_SXA3_RX_TX = 27, + STEDMA40_DEV_SD_MM2_TX = 28, + STEDMA40_DEV_SD_MM0_TX = 29, + STEDMA40_DEV_MSP1_TX = 30, + /* + * This channel is either SlimBus or MSP, + * never both at the same time. + */ + STEDMA40_SLIM0_CH0_TX = 31, + STEDMA40_DEV_MSP0_TX = 31, + STEDMA40_DEV_SD_MM1_TX = 32, + STEDMA40_DEV_SPI2_TX = 33, + /* Secondary I2C3 channel */ + STEDMA40_DEV_I2C3_TX2 = 34, + STEDMA40_DEV_SPI1_TX = 35, + STEDMA40_DEV_USB_OTG_OEP_4_12 = 36, + STEDMA40_DEV_USB_OTG_OEP_5_13 = 37, + STEDMA40_DEV_USB_OTG_OEP_6_14 = 38, + STEDMA40_DEV_USB_OTG_OEP_7_15 = 39, + STEDMA40_DEV_SPI3_TX = 40, + STEDMA40_DEV_SD_MM3_TX = 41, + STEDMA40_DEV_SD_MM4_TX = 42, + STEDMA40_DEV_SD_MM5_TX = 43, + STEDMA40_DEV_DST_SXA4_RX_TX = 44, + STEDMA40_DEV_DST_SXA5_RX_TX = 45, + STEDMA40_DEV_DST_SXA6_RX_TX = 46, + STEDMA40_DEV_DST_SXA7_RX_TX = 47, + STEDMA40_DEV_CAC1_TX = 48, + STEDMA40_DEV_CAC1_TX_HAC1_TX = 49, + STEDMA40_DEV_HAC1_TX = 50, + STEDMA40_MEMXCPY_TX_0 = 51, + STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52, + STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53, + STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54, + STEDMA40_DEV_SLIM1_CH3_TX_HSI_TX_CH7 = 55, + STEDMA40_MEMCPY_TX_1 = 56, + STEDMA40_MEMCPY_TX_2 = 57, + STEDMA40_MEMCPY_TX_3 = 58, + STEDMA40_MEMCPY_TX_4 = 59, + STEDMA40_MEMCPY_TX_5 = 60, + STEDMA40_DEV_CAC0_TX = 61, + STEDMA40_DEV_CAC0_TX_HAC0_TX = 62, + STEDMA40_DEV_HAC0_TX = 63, +}; + +#endif diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index dc2ac42d6319..393e9219a5b6 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -624,79 +624,58 @@ do { \ __raw_writel(l, base + reg); \ } while(0) -void omap_set_gpio_debounce(int gpio, int enable) +/** + * _set_gpio_debounce - low level gpio debounce time + * @bank: the gpio bank we're acting upon + * @gpio: the gpio number on this @gpio + * @debounce: debounce time to use + * + * OMAP's debounce time is in 31us steps so we need + * to convert and round up to the closest unit. + */ +static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, + unsigned debounce) { - struct gpio_bank *bank; - void __iomem *reg; - unsigned long flags; - u32 val, l = 1 << get_gpio_index(gpio); + void __iomem *reg = bank->base; + u32 val; + u32 l; + + if (debounce < 32) + debounce = 0x01; + else if (debounce > 7936) + debounce = 0xff; + else + debounce = (debounce / 0x1f) - 1; - if (cpu_class_is_omap1()) - return; + l = 1 << get_gpio_index(gpio); - bank = get_gpio_bank(gpio); - reg = bank->base; + if (cpu_is_omap44xx()) + reg += OMAP4_GPIO_DEBOUNCINGTIME; + else + reg += OMAP24XX_GPIO_DEBOUNCE_VAL; + + __raw_writel(debounce, reg); + reg = bank->base; if (cpu_is_omap44xx()) reg += OMAP4_GPIO_DEBOUNCENABLE; else reg += OMAP24XX_GPIO_DEBOUNCE_EN; - if (!(bank->mod_usage & l)) { - printk(KERN_ERR "GPIO %d not requested\n", gpio); - return; - } - - spin_lock_irqsave(&bank->lock, flags); val = __raw_readl(reg); - if (enable && !(val & l)) + if (debounce) { val |= l; - else if (!enable && (val & l)) - val &= ~l; - else - goto done; - - if (cpu_is_omap34xx() || cpu_is_omap44xx()) { - bank->dbck_enable_mask = val; - if (enable) + if (cpu_is_omap34xx() || cpu_is_omap44xx()) clk_enable(bank->dbck); - else + } else { + val &= ~l; + if (cpu_is_omap34xx() || cpu_is_omap44xx()) clk_disable(bank->dbck); } __raw_writel(val, reg); -done: - spin_unlock_irqrestore(&bank->lock, flags); } -EXPORT_SYMBOL(omap_set_gpio_debounce); - -void omap_set_gpio_debounce_time(int gpio, int enc_time) -{ - struct gpio_bank *bank; - void __iomem *reg; - - if (cpu_class_is_omap1()) - return; - - bank = get_gpio_bank(gpio); - reg = bank->base; - - if (!bank->mod_usage) { - printk(KERN_ERR "GPIO not requested\n"); - return; - } - - enc_time &= 0xff; - - if (cpu_is_omap44xx()) - reg += OMAP4_GPIO_DEBOUNCINGTIME; - else - reg += OMAP24XX_GPIO_DEBOUNCE_VAL; - - __raw_writel(enc_time, reg); -} -EXPORT_SYMBOL(omap_set_gpio_debounce_time); #ifdef CONFIG_ARCH_OMAP2PLUS static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, @@ -1656,6 +1635,20 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) return 0; } +static int gpio_debounce(struct gpio_chip *chip, unsigned offset, + unsigned debounce) +{ + struct gpio_bank *bank; + unsigned long flags; + + bank = container_of(chip, struct gpio_bank, chip); + spin_lock_irqsave(&bank->lock, flags); + _set_gpio_debounce(bank, offset, debounce); + spin_unlock_irqrestore(&bank->lock, flags); + + return 0; +} + static void gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct gpio_bank *bank; @@ -1909,6 +1902,7 @@ static int __init _omap_gpio_init(void) bank->chip.direction_input = gpio_input; bank->chip.get = gpio_get; bank->chip.direction_output = gpio_output; + bank->chip.set_debounce = gpio_debounce; bank->chip.set = gpio_set; bank->chip.to_irq = gpio_2irq; if (bank_is_mpuio(bank)) { diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig index 5cb2dd1da632..11d6a1bbd90d 100644 --- a/arch/arm/plat-s5p/Kconfig +++ b/arch/arm/plat-s5p/Kconfig @@ -29,3 +29,4 @@ config S5P_EXT_INT bool help Use the external interrupts (other than GPIO interrupts.) + Note: Do not choose this for S5P6440. diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-s5p/clock.c index 24a931fd8d3b..b5e255265f20 100644 --- a/arch/arm/plat-s5p/clock.c +++ b/arch/arm/plat-s5p/clock.c @@ -148,6 +148,7 @@ static struct clk *s5p_clks[] __initdata = { &clk_fout_vpll, &clk_arm, &clk_vpll, + &clk_xusbxti, }; void __init s5p_register_clocks(unsigned long xtal_freq) diff --git a/arch/arm/plat-s5p/include/plat/irqs.h b/arch/arm/plat-s5p/include/plat/irqs.h index 9ff3d718be39..3fb3a3a17465 100644 --- a/arch/arm/plat-s5p/include/plat/irqs.h +++ b/arch/arm/plat-s5p/include/plat/irqs.h @@ -87,4 +87,11 @@ #define IRQ_TIMER3 S5P_TIMER_IRQ(3) #define IRQ_TIMER4 S5P_TIMER_IRQ(4) +#define IRQ_EINT(x) ((x) < 16 ? ((x) + S5P_EINT_BASE1) \ + : ((x) - 16 + S5P_EINT_BASE2)) + +#define EINT_OFFSET(irq) ((irq) < S5P_EINT_BASE2 ? \ + ((irq) - S5P_EINT_BASE1) : \ + ((irq) + 16 - S5P_EINT_BASE2)) + #endif /* __ASM_PLAT_S5P_IRQS_H */ diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-s5p/irq-eint.c index eaa70aa0127b..e56c8075df97 100644 --- a/arch/arm/plat-s5p/irq-eint.c +++ b/arch/arm/plat-s5p/irq-eint.c @@ -60,7 +60,7 @@ static void s5p_irq_eint_maskack(unsigned int irq) static int s5p_irq_eint_set_type(unsigned int irq, unsigned int type) { - int offs = eint_offset(irq); + int offs = EINT_OFFSET(irq); int shift; u32 ctrl, mask; u32 newvalue = 0; @@ -139,17 +139,16 @@ static struct irq_chip s5p_irq_eint = { */ static inline void s5p_irq_demux_eint(unsigned int start) { - u32 status; + u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); unsigned int irq; - status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); status &= ~mask; status &= 0xff; while (status) { - irq = fls(status); - generic_handle_irq(irq - 1 + start); + irq = fls(status) - 1; + generic_handle_irq(irq + start); status &= ~(1 << irq); } } @@ -162,12 +161,18 @@ static void s5p_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc) static inline void s5p_irq_vic_eint_mask(unsigned int irq) { + void __iomem *base = get_irq_chip_data(irq); + s5p_irq_eint_mask(irq); + writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE_CLEAR); } static void s5p_irq_vic_eint_unmask(unsigned int irq) { + void __iomem *base = get_irq_chip_data(irq); + s5p_irq_eint_unmask(irq); + writel(1 << EINT_OFFSET(irq), base + VIC_INT_ENABLE); } static inline void s5p_irq_vic_eint_ack(unsigned int irq) diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h index 34efdd2b032c..db4112c6f2be 100644 --- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h +++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h @@ -43,6 +43,11 @@ struct s3c_gpio_chip; * layouts. Provide an point to vector control routine and provide any * per-bank configuration information that other systems such as the * external interrupt code will need. + * + * @sa s3c_gpio_cfgpin + * @sa s3c_gpio_getcfg + * @sa s3c_gpio_setpull + * @sa s3c_gpio_getpull */ struct s3c_gpio_cfg { unsigned int cfg_eint; @@ -70,11 +75,25 @@ struct s3c_gpio_cfg { /** * s3c_gpio_cfgpin() - Change the GPIO function of a pin. * @pin pin The pin number to configure. - * @pin to The configuration for the pin's function. + * @to to The configuration for the pin's function. * * Configure which function is actually connected to the external * pin, such as an gpio input, output or some form of special function * connected to an internal peripheral block. + * + * The @to parameter can be one of the generic S3C_GPIO_INPUT, S3C_GPIO_OUTPUT + * or S3C_GPIO_SFN() to indicate one of the possible values that the helper + * will then generate the correct bit mask and shift for the configuration. + * + * If a bank of GPIOs all needs to be set to special-function 2, then + * the following code will work: + * + * for (gpio = start; gpio < end; gpio++) + * s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(2)); + * + * The @to parameter can also be a specific value already shifted to the + * correct position in the control register, although these are discouraged + * in newer kernels and are only being kept for compatibility. */ extern int s3c_gpio_cfgpin(unsigned int pin, unsigned int to); @@ -108,6 +127,8 @@ extern unsigned s3c_gpio_getcfg(unsigned int pin); * This function sets the state of the pull-{up,down} resistor for the * specified pin. It will return 0 if successfull, or a negative error * code if the pin cannot support the requested pull setting. + * + * @pull is one of S3C_GPIO_PULL_NONE, S3C_GPIO_PULL_DOWN or S3C_GPIO_PULL_UP. */ extern int s3c_gpio_setpull(unsigned int pin, s3c_gpio_pull_t pull); diff --git a/arch/avr32/include/asm/scatterlist.h b/arch/avr32/include/asm/scatterlist.h index 377320e3bd17..06394e5ead6c 100644 --- a/arch/avr32/include/asm/scatterlist.h +++ b/arch/avr32/include/asm/scatterlist.h @@ -1,25 +1,7 @@ #ifndef __ASM_AVR32_SCATTERLIST_H #define __ASM_AVR32_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -/* These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include #define ISA_DMA_THRESHOLD (0xffffffff) diff --git a/arch/blackfin/include/asm/scatterlist.h b/arch/blackfin/include/asm/scatterlist.h index 04f448711cd0..64d41d34ab0b 100644 --- a/arch/blackfin/include/asm/scatterlist.h +++ b/arch/blackfin/include/asm/scatterlist.h @@ -1,27 +1,7 @@ #ifndef _BLACKFIN_SCATTERLIST_H #define _BLACKFIN_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include #define ISA_DMA_THRESHOLD (0xffffffff) diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 43eb969405d1..6ec77685df52 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -292,28 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } -#ifdef CONFIG_BINFMT_ELF_FDPIC - case PTRACE_GETFDPIC: { - unsigned long tmp = 0; - - switch (addr) { - case_PTRACE_GETFDPIC_EXEC: - case PTRACE_GETFDPIC_EXEC: - tmp = child->mm->context.exec_fdpic_loadmap; - break; - case_PTRACE_GETFDPIC_INTERP: - case PTRACE_GETFDPIC_INTERP: - tmp = child->mm->context.interp_fdpic_loadmap; - break; - default: - break; - } - - ret = put_user(tmp, datap); - break; - } -#endif - /* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKEDATA: pr_debug("ptrace: PTRACE_PEEKDATA\n"); @@ -357,8 +335,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) case PTRACE_PEEKUSR: switch (addr) { #ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ - case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC; - case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP; + case PT_FDPIC_EXEC: + request = PTRACE_GETFDPIC; + addr = PTRACE_GETFDPIC_EXEC; + goto case_default; + case PT_FDPIC_INTERP: + request = PTRACE_GETFDPIC; + addr = PTRACE_GETFDPIC_INTERP; + goto case_default; #endif default: ret = get_reg(child, addr, datap); @@ -385,6 +369,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) 0, sizeof(struct pt_regs), (const void __user *)data); + case_default: default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c index 77630df94343..884275629ef7 100644 --- a/arch/cris/arch-v10/drivers/ds1302.c +++ b/arch/cris/arch-v10/drivers/ds1302.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -238,9 +239,7 @@ static unsigned char days_in_mo[] = /* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */ -static int -rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, - unsigned long arg) +static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned long flags; @@ -354,6 +353,17 @@ rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, } } +static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + ret = rtc_ioctl(file, cmd, arg); + unlock_kernel(); + + return ret; +} + static void print_rtc_status(void) { @@ -375,8 +385,8 @@ print_rtc_status(void) /* The various file operations we support. */ static const struct file_operations rtc_fops = { - .owner = THIS_MODULE, - .ioctl = rtc_ioctl, + .owner = THIS_MODULE, + .unlocked_ioctl = rtc_unlocked_ioctl, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c index 1e90c1a9c849..7dcb1f85f42b 100644 --- a/arch/cris/arch-v10/drivers/pcf8563.c +++ b/arch/cris/arch-v10/drivers/pcf8563.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,7 @@ static DEFINE_MUTEX(rtc_lock); /* Protect state etc */ static const unsigned char days_in_month[] = { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; -int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long); /* Cache VL bit value read at driver init since writing the RTC_SECOND * register clears the VL status. @@ -62,7 +63,7 @@ static int voltage_low; static const struct file_operations pcf8563_fops = { .owner = THIS_MODULE, - .ioctl = pcf8563_ioctl, + .unlocked_ioctl = pcf8563_unlocked_ioctl, }; unsigned char @@ -212,8 +213,7 @@ pcf8563_exit(void) * ioctl calls for this driver. Why return -ENOTTY upon error? Because * POSIX says so! */ -int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { /* Some sanity checks. */ if (_IOC_TYPE(cmd) != RTC_MAGIC) @@ -339,6 +339,17 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } +static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + return pcf8563_ioctl(filp, cmd, arg); + unlock_kernel(); + + return ret; +} + static int __init pcf8563_register(void) { if (pcf8563_init() < 0) { diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c index 1a61efc13982..a0c0df8be9c8 100644 --- a/arch/cris/arch-v10/kernel/irq.c +++ b/arch/cris/arch-v10/kernel/irq.c @@ -17,8 +17,8 @@ #include #include -#define mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr)); -#define unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr)); +#define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr)); +#define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr)); /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is * global just so that the kernel gdb can use it. @@ -116,12 +116,12 @@ static unsigned int startup_crisv10_irq(unsigned int irq) static void enable_crisv10_irq(unsigned int irq) { - unmask_irq(irq); + crisv10_unmask_irq(irq); } static void disable_crisv10_irq(unsigned int irq) { - mask_irq(irq); + crisv10_mask_irq(irq); } static void ack_crisv10_irq(unsigned int irq) diff --git a/arch/cris/arch-v10/lib/dmacopy.c b/arch/cris/arch-v10/lib/dmacopy.c index e5fb44f505c5..49f5b8ca5b47 100644 --- a/arch/cris/arch-v10/lib/dmacopy.c +++ b/arch/cris/arch-v10/lib/dmacopy.c @@ -1,5 +1,4 @@ -/* $Id: dmacopy.c,v 1.1 2001/12/17 13:59:27 bjornw Exp $ - * +/* * memcpy for large blocks, using memory-memory DMA channels 6 and 7 in Etrax */ @@ -13,11 +12,11 @@ void *dma_memcpy(void *pdst, unsigned int pn) { static etrax_dma_descr indma, outdma; - - D(printk("dma_memcpy %d bytes... ", pn)); + + D(printk(KERN_DEBUG "dma_memcpy %d bytes... ", pn)); #if 0 - *R_GEN_CONFIG = genconfig_shadow = + *R_GEN_CONFIG = genconfig_shadow = (genconfig_shadow & ~0x3c0000) | IO_STATE(R_GEN_CONFIG, dma6, intdma7) | IO_STATE(R_GEN_CONFIG, dma7, intdma6); @@ -32,11 +31,11 @@ void *dma_memcpy(void *pdst, *R_DMA_CH7_FIRST = &outdma; *R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, start); *R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, start); - - while(*R_DMA_CH7_CMD == 1) /* wait for completion */ ; - D(printk("done\n")); + while (*R_DMA_CH7_CMD == 1) + /* wait for completion */; + D(printk(KERN_DEBUG "done\n")); } diff --git a/arch/cris/arch-v10/lib/hw_settings.S b/arch/cris/arch-v10/lib/hw_settings.S index 56905aaa7b6e..c09f19f478a5 100644 --- a/arch/cris/arch-v10/lib/hw_settings.S +++ b/arch/cris/arch-v10/lib/hw_settings.S @@ -1,13 +1,11 @@ /* - * $Id: hw_settings.S,v 1.1 2001/12/17 13:59:27 bjornw Exp $ - * * This table is used by some tools to extract hardware parameters. * The table should be included in the kernel and the decompressor. * Don't forget to update the tools if you change this table. * * Copyright (C) 2001 Axis Communications AB * - * Authors: Mikael Starvik (starvik@axis.com) + * Authors: Mikael Starvik (starvik@axis.com) */ #define PA_SET_VALUE ((CONFIG_ETRAX_DEF_R_PORT_PA_DIR << 8) | \ @@ -15,13 +13,13 @@ #define PB_SET_VALUE ((CONFIG_ETRAX_DEF_R_PORT_PB_CONFIG << 16) | \ (CONFIG_ETRAX_DEF_R_PORT_PB_DIR << 8) | \ (CONFIG_ETRAX_DEF_R_PORT_PB_DATA)) - + .ascii "HW_PARAM_MAGIC" ; Magic number .dword 0xc0004000 ; Kernel start address ; Debug port #ifdef CONFIG_ETRAX_DEBUG_PORT0 - .dword 0 + .dword 0 #elif defined(CONFIG_ETRAX_DEBUG_PORT1) .dword 1 #elif defined(CONFIG_ETRAX_DEBUG_PORT2) @@ -30,7 +28,7 @@ .dword 3 #else .dword 4 ; No debug -#endif +#endif ; SDRAM or EDO DRAM? #ifdef CONFIG_ETRAX_SDRAM @@ -39,7 +37,7 @@ .dword 0 #endif - ; Register values + ; Register values .dword R_WAITSTATES .dword CONFIG_ETRAX_DEF_R_WAITSTATES .dword R_BUS_CONFIG @@ -56,7 +54,7 @@ .dword CONFIG_ETRAX_DEF_R_DRAM_TIMING #endif .dword R_PORT_PA_SET - .dword PA_SET_VALUE + .dword PA_SET_VALUE .dword R_PORT_PB_SET .dword PB_SET_VALUE .dword 0 ; No more register values diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig index b9e328e688be..a2dd740c5907 100644 --- a/arch/cris/arch-v32/drivers/Kconfig +++ b/arch/cris/arch-v32/drivers/Kconfig @@ -360,24 +360,10 @@ config ETRAX_SER4_DSR_BIT string "Ser 4 DSR bit (empty = not used)" depends on ETRAX_SERIAL_PORT4 -config ETRAX_SER3_CD_BIT +config ETRAX_SER4_CD_BIT string "Ser 4 CD bit (empty = not used)" depends on ETRAX_SERIAL_PORT4 -config ETRAX_RS485 - bool "RS-485 support" - depends on ETRAXFS_SERIAL - help - Enables support for RS-485 serial communication. For a primer on - RS-485, see . - -config ETRAX_RS485_DISABLE_RECEIVER - bool "Disable serial receiver" - depends on ETRAX_RS485 - help - It is necessary to disable the serial receiver to avoid serial - loopback. Not all products are able to do this in software only. - config ETRAX_SYNCHRONOUS_SERIAL bool "Synchronous serial-port support" depends on ETRAX_ARCH_V32 diff --git a/arch/cris/arch-v32/drivers/i2c.c b/arch/cris/arch-v32/drivers/i2c.c index 506826399ae7..2fd6a740d895 100644 --- a/arch/cris/arch-v32/drivers/i2c.c +++ b/arch/cris/arch-v32/drivers/i2c.c @@ -649,10 +649,10 @@ i2c_release(struct inode *inode, struct file *filp) /* Main device API. ioctl's to write or read to/from i2c registers. */ -static int -i2c_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long +i2c_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + int ret; if(_IOC_TYPE(cmd) != ETRAXI2C_IOCTYPE) { return -ENOTTY; } @@ -665,9 +665,13 @@ i2c_ioctl(struct inode *inode, struct file *file, I2C_ARGREG(arg), I2C_ARGVALUE(arg))); - return i2c_writereg(I2C_ARGSLAVE(arg), + lock_kernel(); + ret = i2c_writereg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg), I2C_ARGVALUE(arg)); + unlock_kernel(); + return ret; + case I2C_READREG: { unsigned char val; @@ -675,7 +679,9 @@ i2c_ioctl(struct inode *inode, struct file *file, D(printk("i2cr %d %d ", I2C_ARGSLAVE(arg), I2C_ARGREG(arg))); + lock_kernel(); val = i2c_readreg(I2C_ARGSLAVE(arg), I2C_ARGREG(arg)); + unlock_kernel(); D(printk("= %d\n", val)); return val; } @@ -688,10 +694,10 @@ i2c_ioctl(struct inode *inode, struct file *file, } static const struct file_operations i2c_fops = { - .owner = THIS_MODULE, - .ioctl = i2c_ioctl, - .open = i2c_open, - .release = i2c_release, + .owner = THIS_MODULE, + .unlocked_ioctl = i2c_ioctl, + .open = i2c_open, + .release = i2c_release, }; static int __init i2c_init(void) diff --git a/arch/cris/arch-v32/drivers/pcf8563.c b/arch/cris/arch-v32/drivers/pcf8563.c index f4478506e52c..bef6eb53b153 100644 --- a/arch/cris/arch-v32/drivers/pcf8563.c +++ b/arch/cris/arch-v32/drivers/pcf8563.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -49,7 +50,7 @@ static DEFINE_MUTEX(rtc_lock); /* Protect state etc */ static const unsigned char days_in_month[] = { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; -int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* Cache VL bit value read at driver init since writing the RTC_SECOND * register clears the VL status. @@ -57,8 +58,8 @@ int pcf8563_ioctl(struct inode *, struct file *, unsigned int, unsigned long); static int voltage_low; static const struct file_operations pcf8563_fops = { - .owner = THIS_MODULE, - .ioctl = pcf8563_ioctl + .owner = THIS_MODULE, + .unlocked_ioctl = pcf8563_unlocked_ioctl, }; unsigned char @@ -208,8 +209,7 @@ pcf8563_exit(void) * ioctl calls for this driver. Why return -ENOTTY upon error? Because * POSIX says so! */ -int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { /* Some sanity checks. */ if (_IOC_TYPE(cmd) != RTC_MAGIC) @@ -335,6 +335,17 @@ int pcf8563_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } +static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret; + + lock_kernel(); + return pcf8563_ioctl(filp, cmd, arg); + unlock_kernel(); + + return ret; +} + static int __init pcf8563_register(void) { if (pcf8563_init() < 0) { diff --git a/arch/cris/arch-v32/kernel/crisksyms.c b/arch/cris/arch-v32/kernel/crisksyms.c index 64933e2c0f5b..bde8d1a10cad 100644 --- a/arch/cris/arch-v32/kernel/crisksyms.c +++ b/arch/cris/arch-v32/kernel/crisksyms.c @@ -24,5 +24,5 @@ EXPORT_SYMBOL(crisv32_io_get_name); EXPORT_SYMBOL(crisv32_io_get); /* Functions masking/unmasking interrupts */ -EXPORT_SYMBOL(mask_irq); -EXPORT_SYMBOL(unmask_irq); +EXPORT_SYMBOL(crisv32_mask_irq); +EXPORT_SYMBOL(crisv32_unmask_irq); diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index b6241198fb98..0b1febe44aa3 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c @@ -280,8 +280,7 @@ out: return cpu; } -void -mask_irq(int irq) +void crisv32_mask_irq(int irq) { int cpu; @@ -289,8 +288,7 @@ mask_irq(int irq) block_irq(irq, cpu); } -void -unmask_irq(int irq) +void crisv32_unmask_irq(int irq) { unblock_irq(irq, irq_cpu(irq)); } @@ -298,23 +296,23 @@ unmask_irq(int irq) static unsigned int startup_crisv32_irq(unsigned int irq) { - unmask_irq(irq); + crisv32_unmask_irq(irq); return 0; } static void shutdown_crisv32_irq(unsigned int irq) { - mask_irq(irq); + crisv32_mask_irq(irq); } static void enable_crisv32_irq(unsigned int irq) { - unmask_irq(irq); + crisv32_unmask_irq(irq); } static void disable_crisv32_irq(unsigned int irq) { - mask_irq(irq); + crisv32_mask_irq(irq); } static void ack_crisv32_irq(unsigned int irq) diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 058adddf4e4b..84fed3b4b079 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -168,8 +168,8 @@ void __init smp_callin(void) /* Enable IRQ and idle */ REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); - unmask_irq(IPI_INTR_VECT); - unmask_irq(TIMER0_INTR_VECT); + crisv32_unmask_irq(IPI_INTR_VECT); + crisv32_unmask_irq(TIMER0_INTR_VECT); preempt_disable(); notify_cpu_starting(cpu); local_irq_enable(); diff --git a/arch/cris/include/arch-v10/arch/irq.h b/arch/cris/include/arch-v10/arch/irq.h index 6248004eca1c..7d345947b3ee 100644 --- a/arch/cris/include/arch-v10/arch/irq.h +++ b/arch/cris/include/arch-v10/arch/irq.h @@ -93,15 +93,16 @@ void set_break_vector(int n, irqvectptr addr); "push $r10\n\t" /* push orig_r10 */ \ "clear.d [$sp=$sp-4]\n\t" /* frametype - this is a normal stackframe */ - /* BLOCK_IRQ and UNBLOCK_IRQ do the same as mask_irq and unmask_irq */ +/* BLOCK_IRQ and UNBLOCK_IRQ do the same as + * crisv10_mask_irq and crisv10_unmask_irq */ #define BLOCK_IRQ(mask,nr) \ "move.d " #mask ",$r0\n\t" \ - "move.d $r0,[0xb00000d8]\n\t" - + "move.d $r0,[0xb00000d8]\n\t" + #define UNBLOCK_IRQ(mask) \ "move.d " #mask ",$r0\n\t" \ - "move.d $r0,[0xb00000dc]\n\t" + "move.d $r0,[0xb00000dc]\n\t" #define IRQ_NAME2(nr) nr##_interrupt(void) #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) diff --git a/arch/cris/include/arch-v32/arch/irq.h b/arch/cris/include/arch-v32/arch/irq.h index 9e4c9fbdfddf..b31e9984f849 100644 --- a/arch/cris/include/arch-v32/arch/irq.h +++ b/arch/cris/include/arch-v32/arch/irq.h @@ -23,8 +23,8 @@ struct etrax_interrupt_vector { extern struct etrax_interrupt_vector *etrax_irv; /* head.S */ -void mask_irq(int irq); -void unmask_irq(int irq); +void crisv32_mask_irq(int irq); +void crisv32_unmask_irq(int irq); void set_exception_vector(int n, irqvectptr addr); diff --git a/arch/cris/include/asm/param.h b/arch/cris/include/asm/param.h index 0e47994e40be..484fcf8667c0 100644 --- a/arch/cris/include/asm/param.h +++ b/arch/cris/include/asm/param.h @@ -2,22 +2,9 @@ #define _ASMCRIS_PARAM_H /* Currently we assume that HZ=100 is good for CRIS. */ -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#endif - -#ifndef HZ -#define HZ 100 -#endif #define EXEC_PAGESIZE 8192 -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ +#include -#endif +#endif /* _ASMCRIS_PARAM_H */ diff --git a/arch/cris/include/asm/scatterlist.h b/arch/cris/include/asm/scatterlist.h index faff53ad1f96..249a7842ff5f 100644 --- a/arch/cris/include/asm/scatterlist.h +++ b/arch/cris/include/asm/scatterlist.h @@ -1,22 +1,7 @@ #ifndef __ASM_CRIS_SCATTERLIST_H #define __ASM_CRIS_SCATTERLIST_H -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - char * address; /* Location data is to be transferred to */ - unsigned int length; - - /* The following is i386 highmem junk - not used by us */ - unsigned long page_link; - unsigned int offset;/* for highmem, page offset */ - -}; - -#define sg_dma_address(sg) ((sg)->address) -#define sg_dma_len(sg) ((sg)->length) -/* i386 junk */ +#include #define ISA_DMA_THRESHOLD (0x1fffffff) diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h index 7dc0f0f85b7c..2797163b8f4f 100644 --- a/arch/frv/include/asm/cache.h +++ b/arch/frv/include/asm/cache.h @@ -17,8 +17,6 @@ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES - #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) diff --git a/arch/frv/include/asm/mem-layout.h b/arch/frv/include/asm/mem-layout.h index 2947764fc0e0..ccae981876fa 100644 --- a/arch/frv/include/asm/mem-layout.h +++ b/arch/frv/include/asm/mem-layout.h @@ -35,8 +35,8 @@ * the slab must be aligned such that load- and store-double instructions don't * fault if used */ -#define ARCH_KMALLOC_MINALIGN 8 -#define ARCH_SLAB_MINALIGN 8 +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES +#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES /*****************************************************************************/ /* diff --git a/arch/frv/include/asm/scatterlist.h b/arch/frv/include/asm/scatterlist.h index 4bca8a28546c..1614bfd7e3a4 100644 --- a/arch/frv/include/asm/scatterlist.h +++ b/arch/frv/include/asm/scatterlist.h @@ -1,45 +1,7 @@ #ifndef _ASM_SCATTERLIST_H #define _ASM_SCATTERLIST_H -#include - -/* - * Drivers must set either ->address or (preferred) page and ->offset - * to indicate where data must be transferred to/from. - * - * Using page is recommended since it handles highmem data as well as - * low mem. ->address is restricted to data which has a virtual mapping, and - * it will go away in the future. Updating to page can be automated very - * easily -- something like - * - * sg->address = some_ptr; - * - * can be rewritten as - * - * sg_set_buf(sg, some_ptr, length); - * - * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens - */ -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; /* for highmem, page offset */ - - dma_addr_t dma_address; - unsigned int length; -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include #define ISA_DMA_THRESHOLD (0xffffffffUL) diff --git a/arch/frv/kernel/break.S b/arch/frv/kernel/break.S index bd0bdf908d93..cbb6958a3147 100644 --- a/arch/frv/kernel/break.S +++ b/arch/frv/kernel/break.S @@ -21,7 +21,7 @@ # # the break handler has its own stack # - .section .bss.stack + .section .bss..stack .globl __break_user_context .balign THREAD_SIZE __break_stack: @@ -63,7 +63,7 @@ __break_trace_through_exceptions: # entry point for Break Exceptions/Interrupts # ############################################################################### - .section .text.break + .section .text..break .balign 4 .globl __entry_break __entry_break: diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S index 189397ec012a..63d579bf1c29 100644 --- a/arch/frv/kernel/entry.S +++ b/arch/frv/kernel/entry.S @@ -38,7 +38,7 @@ #define nr_syscalls ((syscall_table_size)/4) - .section .text.entry + .section .text..entry .balign 4 .macro LEDS val diff --git a/arch/frv/kernel/head.S b/arch/frv/kernel/head.S index b825ef3f2d54..e9a8cc63ac94 100644 --- a/arch/frv/kernel/head.S +++ b/arch/frv/kernel/head.S @@ -542,7 +542,7 @@ __head_end: .size _boot, .-_boot # provide a point for GDB to place a break - .section .text.start,"ax" + .section .text..start,"ax" .globl _start .balign 4 _start: diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c index 60eeed3694c0..fac028936a04 100644 --- a/arch/frv/kernel/ptrace.c +++ b/arch/frv/kernel/ptrace.c @@ -344,26 +344,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) 0, sizeof(child->thread.user->f), (const void __user *)data); - case PTRACE_GETFDPIC: - tmp = 0; - switch (addr) { - case PTRACE_GETFDPIC_EXEC: - tmp = child->mm->context.exec_fdpic_loadmap; - break; - case PTRACE_GETFDPIC_INTERP: - tmp = child->mm->context.interp_fdpic_loadmap; - break; - default: - break; - } - - ret = 0; - if (put_user(tmp, (unsigned long *) data)) { - ret = -EFAULT; - break; - } - break; - default: ret = ptrace_request(child, request, addr, data); break; diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c index 71abd1510a59..6c155d69da29 100644 --- a/arch/frv/kernel/sysctl.c +++ b/arch/frv/kernel/sysctl.c @@ -46,8 +46,9 @@ static void frv_change_dcache_mode(unsigned long newmode) /* * handle requests to dynamically switch the write caching mode delivered by /proc */ -static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int procctl_frv_cachemode(ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) { unsigned long hsr0; char buff[8]; @@ -84,7 +85,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, } /* read the state */ - if (filp->f_pos > 0) { + if (*ppos > 0) { *lenp = 0; return 0; } @@ -110,7 +111,7 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, return -EFAULT; *lenp = len; - filp->f_pos = len; + *ppos = len; return 0; } /* end procctl_frv_cachemode() */ @@ -120,8 +121,9 @@ static int procctl_frv_cachemode(ctl_table *table, int write, struct file *filp, * permit the mm_struct the nominated process is using have its MMU context ID pinned */ #ifdef CONFIG_MMU -static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int procctl_frv_pin_cxnr(ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) { pid_t pid; char buff[16], *p; @@ -150,7 +152,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, } /* read the currently pinned CXN */ - if (filp->f_pos > 0) { + if (*ppos > 0) { *lenp = 0; return 0; } @@ -163,7 +165,7 @@ static int procctl_frv_pin_cxnr(ctl_table *table, int write, struct file *filp, return -EFAULT; *lenp = len; - filp->f_pos = len; + *ppos = len; return 0; } /* end procctl_frv_pin_cxnr() */ diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index cbe811fccfcc..8b973f3cc90e 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -57,10 +57,10 @@ SECTIONS _text = .; _stext = .; .text : { - *(.text.start) - *(.text.entry) - *(.text.break) - *(.text.tlbmiss) + *(.text..start) + *(.text..entry) + *(.text..break) + *(.text..tlbmiss) TEXT_TEXT SCHED_TEXT LOCK_TEXT @@ -114,7 +114,7 @@ SECTIONS .sbss : { *(.sbss .sbss.*) } .bss : { *(.bss .bss.*) } - .bss.stack : { *(.bss) } + .bss..stack : { *(.bss) } __bss_stop = .; _end = . ; diff --git a/arch/frv/mm/tlb-miss.S b/arch/frv/mm/tlb-miss.S index 7f392bc651a3..f3ac019bb18b 100644 --- a/arch/frv/mm/tlb-miss.S +++ b/arch/frv/mm/tlb-miss.S @@ -15,7 +15,7 @@ #include #include - .section .text.tlbmiss + .section .text..tlbmiss .balign 4 .globl __entry_insn_mmu_miss diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S index 985a81a2435a..10e9a2d1cc6c 100644 --- a/arch/h8300/boot/compressed/head.S +++ b/arch/h8300/boot/compressed/head.S @@ -9,7 +9,7 @@ #define SRAM_START 0xff4000 - .section .text.startup + .section .text..startup .global startup startup: mov.l #SRAM_START+0x8000, sp diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds index 65e2a0d1ae39..a0a3a0ed54ef 100644 --- a/arch/h8300/boot/compressed/vmlinux.lds +++ b/arch/h8300/boot/compressed/vmlinux.lds @@ -4,7 +4,7 @@ SECTIONS { __stext = . ; __text = .; - *(.text.startup) + *(.text..startup) *(.text) __etext = . ; } diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h index d3ecdd87ac90..de08a4a2cc1c 100644 --- a/arch/h8300/include/asm/scatterlist.h +++ b/arch/h8300/include/asm/scatterlist.h @@ -1,17 +1,7 @@ #ifndef _H8300_SCATTERLIST_H #define _H8300_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; +#include #define ISA_DMA_THRESHOLD (0xffffffff) diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9676100b83ee..95610820041e 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -56,6 +56,9 @@ config MMU config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config SWIOTLB bool @@ -495,6 +498,14 @@ config HAVE_ARCH_NODEDATA_EXTENSION def_bool y depends on NUMA +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config HAVE_MEMORYLESS_NODES + def_bool y + depends on NUMA + config ARCH_PROC_KCORE_TEXT def_bool y depends on PROC_KCORE diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 21adbd7f90f8..837dc82a013e 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -94,7 +94,6 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ -#define acpi_ht 0 /* no HT-only mode on IA64 */ #endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/asmmacro.h b/arch/ia64/include/asm/asmmacro.h index c1642fd64029..3ab6d75aa3db 100644 --- a/arch/ia64/include/asm/asmmacro.h +++ b/arch/ia64/include/asm/asmmacro.h @@ -70,12 +70,12 @@ name: * path (ivt.S - TLB miss processing) or in places where it might not be * safe to use a "tpa" instruction (mca_asm.S - error recovery). */ - .section ".data.patch.vtop", "a" // declare section & section attributes + .section ".data..patch.vtop", "a" // declare section & section attributes .previous #define LOAD_PHYSICAL(pr, reg, obj) \ [1:](pr)movl reg = obj; \ - .xdata4 ".data.patch.vtop", 1b-. + .xdata4 ".data..patch.vtop", 1b-. /* * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, @@ -84,11 +84,11 @@ name: #define DO_MCKINLEY_E9_WORKAROUND #ifdef DO_MCKINLEY_E9_WORKAROUND - .section ".data.patch.mckinley_e9", "a" + .section ".data..patch.mckinley_e9", "a" .previous /* workaround for Itanium 2 Errata 9: */ # define FSYS_RETURN \ - .xdata4 ".data.patch.mckinley_e9", 1f-.; \ + .xdata4 ".data..patch.mckinley_e9", 1f-.; \ 1:{ .mib; \ nop.m 0; \ mov r16=ar.pfs; \ @@ -107,11 +107,11 @@ name: * If physical stack register size is different from DEF_NUM_STACK_REG, * dynamically patch the kernel for correct size. */ - .section ".data.patch.phys_stack_reg", "a" + .section ".data..patch.phys_stack_reg", "a" .previous #define LOAD_PHYS_STACK_REG_SIZE(reg) \ [1:] adds reg=IA64_NUM_PHYS_STACK_REG*8+8,r0; \ - .xdata4 ".data.patch.phys_stack_reg", 1b-. + .xdata4 ".data..patch.phys_stack_reg", 1b-. /* * Up until early 2004, use of .align within a function caused bad unwind info. diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h index e7482bd628ff..988254a7d349 100644 --- a/arch/ia64/include/asm/cache.h +++ b/arch/ia64/include/asm/cache.h @@ -24,6 +24,6 @@ # define SMP_CACHE_BYTES (1 << 3) #endif -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif /* _ASM_IA64_CACHE_H */ diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h index 1bd408265694..14aa1c58912b 100644 --- a/arch/ia64/include/asm/percpu.h +++ b/arch/ia64/include/asm/percpu.h @@ -31,7 +31,7 @@ extern void *per_cpu_init(void); #endif /* SMP */ -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" /* * Be extremely careful when taking the address of this variable! Due to virtual diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h index d8e98961dec7..f299a4fb25c8 100644 --- a/arch/ia64/include/asm/scatterlist.h +++ b/arch/ia64/include/asm/scatterlist.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_SCATTERLIST_H #define _ASM_IA64_SCATTERLIST_H +#include /* * It used to be that ISA_DMA_THRESHOLD had something to do with the * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart @@ -10,7 +11,6 @@ * that's 4GB - 1. */ #define ISA_DMA_THRESHOLD 0xffffffff - -#include +#define ARCH_HAS_SG_CHAIN #endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index d323071d0f91..09f646753d1a 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -25,11 +25,6 @@ */ #define RECLAIM_DISTANCE 15 -/* - * Returns the number of the node containing CPU 'cpu' - */ -#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu]) - /* * Returns a bitmask of CPUs on Node 'node'. */ diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate index ab9b03a9adcc..ceeffc509764 100644 --- a/arch/ia64/kernel/Makefile.gate +++ b/arch/ia64/kernel/Makefile.gate @@ -21,7 +21,7 @@ GATECFLAGS_gate-syms.o = -r $(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE $(call if_changed,gate) -# gate-data.o contains the gate DSO image as data in section .data.gate. +# gate-data.o contains the gate DSO image as data in section .data..gate. # We must build gate.so before we can assemble it. # Note: kbuild does not track this dependency due to usage of .incbin $(obj)/gate-data.o: $(obj)/gate.so diff --git a/arch/ia64/kernel/gate-data.S b/arch/ia64/kernel/gate-data.S index 258c0a3238fb..b3ef1c72e132 100644 --- a/arch/ia64/kernel/gate-data.S +++ b/arch/ia64/kernel/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate, "aw" + .section .data..gate, "aw" .incbin "arch/ia64/kernel/gate.so" diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S index cf5e0a105e16..245d3e1ec7e1 100644 --- a/arch/ia64/kernel/gate.S +++ b/arch/ia64/kernel/gate.S @@ -21,18 +21,18 @@ * to targets outside the shared object) and to avoid multi-phase kernel builds, we * simply create minimalistic "patch lists" in special ELF sections. */ - .section ".data.patch.fsyscall_table", "a" + .section ".data..patch.fsyscall_table", "a" .previous #define LOAD_FSYSCALL_TABLE(reg) \ [1:] movl reg=0; \ - .xdata4 ".data.patch.fsyscall_table", 1b-. + .xdata4 ".data..patch.fsyscall_table", 1b-. - .section ".data.patch.brl_fsys_bubble_down", "a" + .section ".data..patch.brl_fsys_bubble_down", "a" .previous #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \ [1:](pr)brl.cond.sptk 0; \ ;; \ - .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-. + .xdata4 ".data..patch.brl_fsys_bubble_down", 1b-. GLOBAL_ENTRY(__kernel_syscall_via_break) .prologue diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S index 88c64ed47c36..d32b0855110a 100644 --- a/arch/ia64/kernel/gate.lds.S +++ b/arch/ia64/kernel/gate.lds.S @@ -33,21 +33,21 @@ SECTIONS */ . = GATE_ADDR + 0x600; - .data.patch : { + .data..patch : { __paravirt_start_gate_mckinley_e9_patchlist = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __paravirt_end_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __paravirt_end_gate_vtop_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .; - *(.data.patch.fsyscall_table) + *(.data..patch.fsyscall_table) __paravirt_end_gate_fsyscall_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; - *(.data.patch.brl_fsys_bubble_down) + *(.data..patch.brl_fsys_bubble_down) __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; } :readable diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index e253ab8fcbc8..f9efe9739d3f 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -23,7 +23,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * Initial task structure. * * We need to make sure that this is properly aligned due to the way process stacks are - * handled. This is done by having a special ".data.init_task" section... + * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index 179fd122e837..d93e396bf599 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -82,7 +82,7 @@ mov r19=n;; /* prepare to save predicates */ \ br.sptk.many dispatch_to_fault_handler - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global ia64_ivt diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index 292e214a3b84..d56753a11636 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -16,7 +16,7 @@ #define ACCOUNT_SYS_ENTER #endif -.section ".data.patch.rse", "a" +.section ".data..patch.rse", "a" .previous /* @@ -215,7 +215,7 @@ (pUStk) extr.u r17=r18,3,6; \ (pUStk) sub r16=r18,r22; \ [1:](pKStk) br.cond.sptk.many 1f; \ - .xdata4 ".data.patch.rse",1b-. \ + .xdata4 ".data..patch.rse",1b-. \ ;; \ cmp.ge p6,p7 = 33,r17; \ ;; \ diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S index 6158560d7f17..92d880c4d3d1 100644 --- a/arch/ia64/kernel/paravirtentry.S +++ b/arch/ia64/kernel/paravirtentry.S @@ -28,7 +28,7 @@ #include "entry.h" #define DATA8(sym, init_value) \ - .pushsection .data.read_mostly ; \ + .pushsection .data..read_mostly ; \ .align 8 ; \ .global sym ; \ sym: ; \ diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 3095654f9ab3..d9485d952ed0 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -31,8 +31,6 @@ struct dma_map_ops swiotlb_dma_ops = { .unmap_sg = swiotlb_unmap_sg_attrs, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .dma_supported = swiotlb_dma_supported, diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 0dec7f702448..7c7909f9bc93 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -638,7 +638,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_STOPPED && !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { @@ -662,7 +662,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) * job control stop, so that SIGCONT can be used to wake it up. */ read_lock(&tasklist_lock); - if (child->signal) { + if (child->sighand) { spin_lock_irq(&child->sighand->siglock); if (child->state == TASK_TRACED && (child->signal->flags & SIGNAL_STOP_STOPPED)) { diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e5230b2ff2c5..6a1380e90f87 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -390,6 +390,14 @@ smp_callin (void) fix_b0_for_bsp(); +#ifdef CONFIG_NUMA + /* + * numa_node_id() works after this. + */ + set_numa_node(cpu_to_node_map[cpuid]); + set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); +#endif + ipi_call_lock_irq(); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ @@ -632,6 +640,9 @@ void __devinit smp_prepare_boot_cpu(void) { cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callin_map); +#ifdef CONFIG_NUMA + set_numa_node(cpu_to_node_map[smp_processor_id()]); +#endif per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; paravirt_post_smp_prepare_boot_cpu(); } diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 1295ba327f6f..e07218a2577f 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -8,7 +8,7 @@ #define IVT_TEXT \ VMLINUX_SYMBOL(__start_ivt_text) = .; \ - *(.text.ivt) \ + *(.text..ivt) \ VMLINUX_SYMBOL(__end_ivt_text) = .; OUTPUT_FORMAT("elf64-ia64-little") @@ -54,8 +54,8 @@ SECTIONS .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP - .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) - { *(.text.lock) } + .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) + { *(.text..lock) } #endif _etext = .; @@ -75,10 +75,10 @@ SECTIONS __stop___mca_table = .; } - .data.patch.phys_stack_reg : AT(ADDR(.data.patch.phys_stack_reg) - LOAD_OFFSET) + .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) { __start___phys_stack_reg_patchlist = .; - *(.data.patch.phys_stack_reg) + *(.data..patch.phys_stack_reg) __end___phys_stack_reg_patchlist = .; } @@ -110,24 +110,24 @@ SECTIONS INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) + .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) { __start___vtop_patchlist = .; - *(.data.patch.vtop) + *(.data..patch.vtop) __end___vtop_patchlist = .; } - .data.patch.rse : AT(ADDR(.data.patch.rse) - LOAD_OFFSET) + .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) { __start___rse_patchlist = .; - *(.data.patch.rse) + *(.data..patch.rse) __end___rse_patchlist = .; } - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) + .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) { __start___mckinley_e9_bundles = .; - *(.data.patch.mckinley_e9) + *(.data..patch.mckinley_e9) __end___mckinley_e9_bundles = .; } @@ -175,17 +175,17 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __start_gate_section = .; - *(.data.gate) + *(.data..gate) __stop_gate_section = .; #ifdef CONFIG_XEN . = ALIGN(PAGE_SIZE); __xen_start_gate_section = .; - *(.data.gate.xen) + *(.data..gate.xen) __xen_stop_gate_section = .; #endif } diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 40920c630649..24018484c6e9 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S @@ -104,7 +104,7 @@ GLOBAL_ENTRY(kvm_vmm_panic) br.call.sptk.many b6=vmm_panic_handler; END(kvm_vmm_panic) - .section .text.ivt,"ax" + .section .text..ivt,"ax" .align 32768 // align on 32KB boundary .global kvm_ia64_ivt diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 64aff520b899..aa2533ae7e9e 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -335,8 +335,11 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) } struct pci_bus * __devinit -pci_acpi_scan_root(struct acpi_device *device, int domain, int bus) +pci_acpi_scan_root(struct acpi_pci_root *root) { + struct acpi_device *device = root->device; + int domain = root->segment; + int bus = root->secondary.start; struct pci_controller *controller; unsigned int windows = 0; struct pci_bus *pbus; diff --git a/arch/ia64/scripts/unwcheck.py b/arch/ia64/scripts/unwcheck.py index c27849889e19..2bfd941ff7c7 100644 --- a/arch/ia64/scripts/unwcheck.py +++ b/arch/ia64/scripts/unwcheck.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # # Usage: unwcheck.py FILE # diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S index 7d4830afc91d..6f95b6b32a4e 100644 --- a/arch/ia64/xen/gate-data.S +++ b/arch/ia64/xen/gate-data.S @@ -1,3 +1,3 @@ - .section .data.gate.xen, "aw" + .section .data..gate.xen, "aw" .incbin "arch/ia64/xen/gate.so" diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S index aff8346ea193..b820ed02ab9f 100644 --- a/arch/ia64/xen/xensetup.S +++ b/arch/ia64/xen/xensetup.S @@ -14,7 +14,7 @@ #include #include - .section .data.read_mostly + .section .data..read_mostly .align 8 .global xen_domain_type xen_domain_type: diff --git a/arch/m32r/include/asm/scatterlist.h b/arch/m32r/include/asm/scatterlist.h index 1ed372c73d0b..aeeddd8dac17 100644 --- a/arch/m32r/include/asm/scatterlist.h +++ b/arch/m32r/include/asm/scatterlist.h @@ -1,20 +1,7 @@ #ifndef _ASM_M32R_SCATTERLIST_H #define _ASM_M32R_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - char * address; /* Location data is to be transferred to, NULL for - * highmem page */ - unsigned long page_link; - unsigned int offset;/* for highmem, page offset */ - - dma_addr_t dma_address; - unsigned int length; -}; +#include #define ISA_DMA_THRESHOLD (0x1fffffff) diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index b5da298ba61d..2e3737b92ffc 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -7,6 +7,7 @@ config M68K default y select HAVE_AOUT select HAVE_IDE + select GENERIC_ATOMIC64 config MMU bool diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index d2cc35d98532..b1577f741fa8 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -97,10 +97,6 @@ static void amiga_get_model(char *model); static void amiga_get_hardware_list(struct seq_file *m); /* amiga specific timer functions */ static unsigned long amiga_gettimeoffset(void); -static int a3000_hwclk(int, struct rtc_time *); -static int a2000_hwclk(int, struct rtc_time *); -static int amiga_set_clock_mmss(unsigned long); -static unsigned int amiga_get_ss(void); extern void amiga_mksound(unsigned int count, unsigned int ticks); static void amiga_reset(void); extern void amiga_init_sound(void); @@ -138,10 +134,6 @@ static struct { } }; -static struct resource rtc_resource = { - .start = 0x00dc0000, .end = 0x00dcffff -}; - static struct resource ram_resource[NUM_MEMINFO]; @@ -387,15 +379,6 @@ void __init config_amiga(void) mach_get_model = amiga_get_model; mach_get_hardware_list = amiga_get_hardware_list; mach_gettimeoffset = amiga_gettimeoffset; - if (AMIGAHW_PRESENT(A3000_CLK)) { - mach_hwclk = a3000_hwclk; - rtc_resource.name = "A3000 RTC"; - request_resource(&iomem_resource, &rtc_resource); - } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ { - mach_hwclk = a2000_hwclk; - rtc_resource.name = "A2000 RTC"; - request_resource(&iomem_resource, &rtc_resource); - } /* * default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI @@ -404,8 +387,6 @@ void __init config_amiga(void) */ mach_max_dma_address = 0xffffffff; - mach_set_clock_mmss = amiga_set_clock_mmss; - mach_get_ss = amiga_get_ss; mach_reset = amiga_reset; #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) mach_beep = amiga_mksound; @@ -530,161 +511,6 @@ static unsigned long amiga_gettimeoffset(void) return ticks + offset; } -static int a3000_hwclk(int op, struct rtc_time *t) -{ - tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD; - - if (!op) { /* read */ - t->tm_sec = tod_3000.second1 * 10 + tod_3000.second2; - t->tm_min = tod_3000.minute1 * 10 + tod_3000.minute2; - t->tm_hour = tod_3000.hour1 * 10 + tod_3000.hour2; - t->tm_mday = tod_3000.day1 * 10 + tod_3000.day2; - t->tm_wday = tod_3000.weekday; - t->tm_mon = tod_3000.month1 * 10 + tod_3000.month2 - 1; - t->tm_year = tod_3000.year1 * 10 + tod_3000.year2; - if (t->tm_year <= 69) - t->tm_year += 100; - } else { - tod_3000.second1 = t->tm_sec / 10; - tod_3000.second2 = t->tm_sec % 10; - tod_3000.minute1 = t->tm_min / 10; - tod_3000.minute2 = t->tm_min % 10; - tod_3000.hour1 = t->tm_hour / 10; - tod_3000.hour2 = t->tm_hour % 10; - tod_3000.day1 = t->tm_mday / 10; - tod_3000.day2 = t->tm_mday % 10; - if (t->tm_wday != -1) - tod_3000.weekday = t->tm_wday; - tod_3000.month1 = (t->tm_mon + 1) / 10; - tod_3000.month2 = (t->tm_mon + 1) % 10; - if (t->tm_year >= 100) - t->tm_year -= 100; - tod_3000.year1 = t->tm_year / 10; - tod_3000.year2 = t->tm_year % 10; - } - - tod_3000.cntrl1 = TOD3000_CNTRL1_FREE; - - return 0; -} - -static int a2000_hwclk(int op, struct rtc_time *t) -{ - int cnt = 5; - - tod_2000.cntrl1 = TOD2000_CNTRL1_HOLD; - - while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) { - tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; - udelay(70); - tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; - --cnt; - } - - if (!cnt) - printk(KERN_INFO "hwclk: timed out waiting for RTC (0x%x)\n", - tod_2000.cntrl1); - - if (!op) { /* read */ - t->tm_sec = tod_2000.second1 * 10 + tod_2000.second2; - t->tm_min = tod_2000.minute1 * 10 + tod_2000.minute2; - t->tm_hour = (tod_2000.hour1 & 3) * 10 + tod_2000.hour2; - t->tm_mday = tod_2000.day1 * 10 + tod_2000.day2; - t->tm_wday = tod_2000.weekday; - t->tm_mon = tod_2000.month1 * 10 + tod_2000.month2 - 1; - t->tm_year = tod_2000.year1 * 10 + tod_2000.year2; - if (t->tm_year <= 69) - t->tm_year += 100; - - if (!(tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE)) { - if (!(tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour == 12) - t->tm_hour = 0; - else if ((tod_2000.hour1 & TOD2000_HOUR1_PM) && t->tm_hour != 12) - t->tm_hour += 12; - } - } else { - tod_2000.second1 = t->tm_sec / 10; - tod_2000.second2 = t->tm_sec % 10; - tod_2000.minute1 = t->tm_min / 10; - tod_2000.minute2 = t->tm_min % 10; - if (tod_2000.cntrl3 & TOD2000_CNTRL3_24HMODE) - tod_2000.hour1 = t->tm_hour / 10; - else if (t->tm_hour >= 12) - tod_2000.hour1 = TOD2000_HOUR1_PM + - (t->tm_hour - 12) / 10; - else - tod_2000.hour1 = t->tm_hour / 10; - tod_2000.hour2 = t->tm_hour % 10; - tod_2000.day1 = t->tm_mday / 10; - tod_2000.day2 = t->tm_mday % 10; - if (t->tm_wday != -1) - tod_2000.weekday = t->tm_wday; - tod_2000.month1 = (t->tm_mon + 1) / 10; - tod_2000.month2 = (t->tm_mon + 1) % 10; - if (t->tm_year >= 100) - t->tm_year -= 100; - tod_2000.year1 = t->tm_year / 10; - tod_2000.year2 = t->tm_year % 10; - } - - tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; - - return 0; -} - -static int amiga_set_clock_mmss(unsigned long nowtime) -{ - short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60; - - if (AMIGAHW_PRESENT(A3000_CLK)) { - tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD; - - tod_3000.second1 = real_seconds / 10; - tod_3000.second2 = real_seconds % 10; - tod_3000.minute1 = real_minutes / 10; - tod_3000.minute2 = real_minutes % 10; - - tod_3000.cntrl1 = TOD3000_CNTRL1_FREE; - } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ { - int cnt = 5; - - tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; - - while ((tod_2000.cntrl1 & TOD2000_CNTRL1_BUSY) && cnt) { - tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; - udelay(70); - tod_2000.cntrl1 |= TOD2000_CNTRL1_HOLD; - --cnt; - } - - if (!cnt) - printk(KERN_INFO "set_clock_mmss: timed out waiting for RTC (0x%x)\n", tod_2000.cntrl1); - - tod_2000.second1 = real_seconds / 10; - tod_2000.second2 = real_seconds % 10; - tod_2000.minute1 = real_minutes / 10; - tod_2000.minute2 = real_minutes % 10; - - tod_2000.cntrl1 &= ~TOD2000_CNTRL1_HOLD; - } - - return 0; -} - -static unsigned int amiga_get_ss(void) -{ - unsigned int s; - - if (AMIGAHW_PRESENT(A3000_CLK)) { - tod_3000.cntrl1 = TOD3000_CNTRL1_HOLD; - s = tod_3000.second1 * 10 + tod_3000.second2; - tod_3000.cntrl1 = TOD3000_CNTRL1_FREE; - } else /* if (AMIGAHW_PRESENT(A2000_CLK)) */ { - s = tod_2000.second1 * 10 + tod_2000.second2; - } - return s; -} - static NORET_TYPE void amiga_reset(void) ATTRIB_NORET; diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c index 38f18bf14737..7fd8b41723ea 100644 --- a/arch/m68k/amiga/platform.c +++ b/arch/m68k/amiga/platform.c @@ -11,6 +11,7 @@ #include #include +#include #ifdef CONFIG_ZORRO @@ -55,11 +56,77 @@ static int __init amiga_init_bus(void) subsys_initcall(amiga_init_bus); -#endif /* CONFIG_ZORRO */ + +static int z_dev_present(zorro_id id) +{ + unsigned int i; + + for (i = 0; i < zorro_num_autocon; i++) + if (zorro_autocon[i].rom.er_Manufacturer == ZORRO_MANUF(id) && + zorro_autocon[i].rom.er_Product == ZORRO_PROD(id)) + return 1; + + return 0; +} + +#else /* !CONFIG_ZORRO */ + +static inline int z_dev_present(zorro_id id) { return 0; } + +#endif /* !CONFIG_ZORRO */ + + +static const struct resource a3000_scsi_resource __initconst = { + .start = 0xdd0000, + .end = 0xdd00ff, + .flags = IORESOURCE_MEM, +}; + + +static const struct resource a4000t_scsi_resource __initconst = { + .start = 0xdd0000, + .end = 0xdd0fff, + .flags = IORESOURCE_MEM, +}; + + +static const struct resource a1200_ide_resource __initconst = { + .start = 0xda0000, + .end = 0xda1fff, + .flags = IORESOURCE_MEM, +}; + +static const struct gayle_ide_platform_data a1200_ide_pdata __initconst = { + .base = 0xda0000, + .irqport = 0xda9000, + .explicit_ack = 1, +}; + + +static const struct resource a4000_ide_resource __initconst = { + .start = 0xdd2000, + .end = 0xdd3fff, + .flags = IORESOURCE_MEM, +}; + +static const struct gayle_ide_platform_data a4000_ide_pdata __initconst = { + .base = 0xdd2020, + .irqport = 0xdd3020, + .explicit_ack = 0, +}; + + +static const struct resource amiga_rtc_resource __initconst = { + .start = 0x00dc0000, + .end = 0x00dcffff, + .flags = IORESOURCE_MEM, +}; static int __init amiga_init_devices(void) { + struct platform_device *pdev; + if (!MACH_IS_AMIGA) return -ENODEV; @@ -77,6 +144,53 @@ static int __init amiga_init_devices(void) if (AMIGAHW_PRESENT(AMI_FLOPPY)) platform_device_register_simple("amiga-floppy", -1, NULL, 0); + if (AMIGAHW_PRESENT(A3000_SCSI)) + platform_device_register_simple("amiga-a3000-scsi", -1, + &a3000_scsi_resource, 1); + + if (AMIGAHW_PRESENT(A4000_SCSI)) + platform_device_register_simple("amiga-a4000t-scsi", -1, + &a4000t_scsi_resource, 1); + + if (AMIGAHW_PRESENT(A1200_IDE) || + z_dev_present(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE)) { + pdev = platform_device_register_simple("amiga-gayle-ide", -1, + &a1200_ide_resource, 1); + platform_device_add_data(pdev, &a1200_ide_pdata, + sizeof(a1200_ide_pdata)); + } + + if (AMIGAHW_PRESENT(A4000_IDE)) { + pdev = platform_device_register_simple("amiga-gayle-ide", -1, + &a4000_ide_resource, 1); + platform_device_add_data(pdev, &a4000_ide_pdata, + sizeof(a4000_ide_pdata)); + } + + + /* other I/O hardware */ + if (AMIGAHW_PRESENT(AMI_KEYBOARD)) + platform_device_register_simple("amiga-keyboard", -1, NULL, 0); + + if (AMIGAHW_PRESENT(AMI_MOUSE)) + platform_device_register_simple("amiga-mouse", -1, NULL, 0); + + if (AMIGAHW_PRESENT(AMI_SERIAL)) + platform_device_register_simple("amiga-serial", -1, NULL, 0); + + if (AMIGAHW_PRESENT(AMI_PARALLEL)) + platform_device_register_simple("amiga-parallel", -1, NULL, 0); + + + /* real time clocks */ + if (AMIGAHW_PRESENT(A2000_CLK)) + platform_device_register_simple("rtc-msm6242", -1, + &amiga_rtc_resource, 1); + + if (AMIGAHW_PRESENT(A3000_CLK)) + platform_device_register_simple("rtc-rp5c01", -1, + &amiga_rtc_resource, 1); + return 0; } diff --git a/arch/m68k/include/asm/amigayle.h b/arch/m68k/include/asm/amigayle.h index bb5a6aa329f3..a01453d9c231 100644 --- a/arch/m68k/include/asm/amigayle.h +++ b/arch/m68k/include/asm/amigayle.h @@ -104,4 +104,10 @@ struct GAYLE { #define GAYLE_CFG_250NS 0x00 #define GAYLE_CFG_720NS 0x0c +struct gayle_ide_platform_data { + unsigned long base; + unsigned long irqport; + int explicit_ack; /* A1200 IDE needs explicit ack */ +}; + #endif /* asm-m68k/amigayle.h */ diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 8d29145ebb27..eab36dcacf6c 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -3,3 +3,5 @@ #else #include "atomic_mm.h" #endif + +#include diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h index fed3fd30de7e..ecafbe1718c3 100644 --- a/arch/m68k/include/asm/cache.h +++ b/arch/m68k/include/asm/cache.h @@ -8,4 +8,6 @@ #define L1_CACHE_SHIFT 4 #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES + #endif diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h index e27ad902b1cf..175da06c6b95 100644 --- a/arch/m68k/include/asm/scatterlist.h +++ b/arch/m68k/include/asm/scatterlist.h @@ -1,23 +1,9 @@ #ifndef _M68K_SCATTERLIST_H #define _M68K_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; - - dma_addr_t dma_address; /* A place to hang host-specific addresses at. */ -}; +#include /* This is bogus and should go away. */ #define ISA_DMA_THRESHOLD (0x00ffffff) -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - #endif /* !(_M68K_SCATTERLIST_H) */ diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 9f1784f586b9..a91b2713451d 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -57,7 +57,7 @@ SECTIONS { .romvec : { __rom_start = . ; _romvec = .; - *(.data.initvect) + *(.data..initvect) } > romvec #endif @@ -68,7 +68,7 @@ SECTIONS { TEXT_TEXT SCHED_TEXT LOCK_TEXT - *(.text.lock) + *(.text..lock) . = ALIGN(16); /* Exception table */ __start___ex_table = .; diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S index 2ef06242398b..8eb94fb6b971 100644 --- a/arch/m68knommu/platform/68360/head-ram.S +++ b/arch/m68knommu/platform/68360/head-ram.S @@ -280,7 +280,7 @@ _dprbase: * and then overwritten as needed. */ -.section ".data.initvect","awx" +.section ".data..initvect","awx" .long RAMEND /* Reset: Initial Stack Pointer - 0. */ .long _start /* Reset: Initial Program Counter - 1. */ .long buserr /* Bus Error - 2. */ diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S index 62ecf4144b3b..97510e55b802 100644 --- a/arch/m68knommu/platform/68360/head-rom.S +++ b/arch/m68knommu/platform/68360/head-rom.S @@ -291,7 +291,7 @@ _dprbase: * and then overwritten as needed. */ -.section ".data.initvect","awx" +.section ".data..initvect","awx" .long RAMEND /* Reset: Initial Stack Pointer - 0. */ .long _start /* Reset: Initial Program Counter - 1. */ .long buserr /* Bus Error - 2. */ diff --git a/arch/microblaze/include/asm/scatterlist.h b/arch/microblaze/include/asm/scatterlist.h index 35d786fe93ae..dc4a8900cc80 100644 --- a/arch/microblaze/include/asm/scatterlist.h +++ b/arch/microblaze/include/asm/scatterlist.h @@ -1 +1,3 @@ #include + +#define ISA_DMA_THRESHOLD (~0UL) diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h index 83d69fe17c9f..9af65e79be36 100644 --- a/arch/mips/include/asm/scatterlist.h +++ b/arch/mips/include/asm/scatterlist.h @@ -1,27 +1,7 @@ #ifndef __ASM_SCATTERLIST_H #define __ASM_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include #define ISA_DMA_THRESHOLD (0x00ffffffUL) diff --git a/arch/mips/lasat/image/head.S b/arch/mips/lasat/image/head.S index efb95f2609c2..e0ecda92c40a 100644 --- a/arch/mips/lasat/image/head.S +++ b/arch/mips/lasat/image/head.S @@ -1,7 +1,7 @@ #include .text - .section .text.start, "ax" + .section .text..start, "ax" .set noreorder .set mips3 diff --git a/arch/mips/lasat/image/romscript.normal b/arch/mips/lasat/image/romscript.normal index 988f8ad189cb..0864c963e188 100644 --- a/arch/mips/lasat/image/romscript.normal +++ b/arch/mips/lasat/image/romscript.normal @@ -4,7 +4,7 @@ SECTIONS { .text : { - *(.text.start) + *(.text..start) } /* Data in ROM */ diff --git a/arch/mn10300/include/asm/scatterlist.h b/arch/mn10300/include/asm/scatterlist.h index 67535901b9ff..7bd00b9e030d 100644 --- a/arch/mn10300/include/asm/scatterlist.h +++ b/arch/mn10300/include/asm/scatterlist.h @@ -11,45 +11,8 @@ #ifndef _ASM_SCATTERLIST_H #define _ASM_SCATTERLIST_H -#include - -/* - * Drivers must set either ->address or (preferred) page and ->offset - * to indicate where data must be transferred to/from. - * - * Using page is recommended since it handles highmem data as well as - * low mem. ->address is restricted to data which has a virtual mapping, and - * it will go away in the future. Updating to page can be automated very - * easily -- something like - * - * sg->address = some_ptr; - * - * can be rewritten as - * - * sg_set_page(virt_to_page(some_ptr)); - * sg->offset = (unsigned long) some_ptr & ~PAGE_MASK; - * - * and that's it. There's no excuse for not highmem enabling YOUR driver. /jens - */ -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; /* for highmem, page offset */ - dma_addr_t dma_address; - unsigned int length; -}; +#include #define ISA_DMA_THRESHOLD (0x00ffffff) -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - #endif /* _ASM_SCATTERLIST_H */ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 9c4da3d63bfb..05a366a5c4d5 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -98,6 +98,9 @@ config STACKTRACE_SUPPORT config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config ISA_DMA_API bool diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 32c2cca74345..45effe6978fa 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -28,7 +28,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 477277739da5..4556d820128a 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -2,6 +2,7 @@ #define _PARISC_CACHEFLUSH_H #include +#include /* The usual comment is "Caches aren't brain-dead on the ". * Unfortunately, that doesn't apply to PA-RISC. */ @@ -125,11 +126,20 @@ static inline void *kmap(struct page *page) #define kunmap(page) kunmap_parisc(page_address(page)) -#define kmap_atomic(page, idx) page_address(page) +static inline void *kmap_atomic(struct page *page, enum km_type idx) +{ + pagefault_disable(); + return page_address(page); +} -#define kunmap_atomic(addr, idx) kunmap_parisc(addr) +static inline void kunmap_atomic(void *addr, enum km_type idx) +{ + kunmap_parisc(addr); + pagefault_enable(); +} -#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn)) +#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) +#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #endif diff --git a/arch/parisc/include/asm/scatterlist.h b/arch/parisc/include/asm/scatterlist.h index 62269b31ebf4..2c3b79b54b28 100644 --- a/arch/parisc/include/asm/scatterlist.h +++ b/arch/parisc/include/asm/scatterlist.h @@ -3,25 +3,9 @@ #include #include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - - unsigned int length; - - /* an IOVA can be 64-bits on some PA-Risc platforms. */ - dma_addr_t iova; /* I/O Virtual Address */ - __u32 iova_length; /* bytes mapped */ -}; - -#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) -#define sg_dma_address(sg) ((sg)->iova) -#define sg_dma_len(sg) ((sg)->iova_length) +#include #define ISA_DMA_THRESHOLD (~0UL) +#define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) #endif /* _ASM_PARISC_SCATTERLIST_H */ diff --git a/arch/parisc/include/asm/system.h b/arch/parisc/include/asm/system.h index 4653c77bf9d1..2ab4af58ecb9 100644 --- a/arch/parisc/include/asm/system.h +++ b/arch/parisc/include/asm/system.h @@ -174,7 +174,7 @@ static inline void set_eiem(unsigned long val) }) #ifdef CONFIG_SMP -# define __lock_aligned __attribute__((__section__(".data.lock_aligned"))) +# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) #endif #define arch_align_stack(x) (x) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index ec787b411e9a..dcd55103a4bb 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -45,8 +45,12 @@ #else #define FRAME_SIZE 64 #endif +#define FRAME_ALIGN 64 -#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) +/* Add FRAME_SIZE to the size x and align it to y. All definitions + * that use align_frame will include space for a frame. + */ +#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y))) int main(void) { @@ -146,7 +150,8 @@ int main(void) DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); BLANK(); DEFINE(TASK_SZ, sizeof(struct task_struct)); - DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64)); + /* TASK_SZ_ALGN includes space for a stack frame. */ + DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN)); BLANK(); DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0])); DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1])); @@ -233,7 +238,8 @@ int main(void) DEFINE(PT_ISR, offsetof(struct pt_regs, isr)); DEFINE(PT_IOR, offsetof(struct pt_regs, ior)); DEFINE(PT_SIZE, sizeof(struct pt_regs)); - DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64)); + /* PT_SZ_ALGN includes space for a stack frame. */ + DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN)); BLANK(); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); @@ -242,7 +248,8 @@ int main(void) DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(THREAD_SZ, sizeof(struct thread_info)); - DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64)); + /* THREAD_SZ_ALGN includes space for a stack frame. */ + DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN)); BLANK(); DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base)); DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride)); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 3a44f7f704fa..6337adef30f6 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -364,32 +364,6 @@ .align 32 .endm - /* The following are simple 32 vs 64 bit instruction - * abstractions for the macros */ - .macro EXTR reg1,start,length,reg2 -#ifdef CONFIG_64BIT - extrd,u \reg1,32+(\start),\length,\reg2 -#else - extrw,u \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEP reg1,start,length,reg2 -#ifdef CONFIG_64BIT - depd \reg1,32+(\start),\length,\reg2 -#else - depw \reg1,\start,\length,\reg2 -#endif - .endm - - .macro DEPI val,start,length,reg -#ifdef CONFIG_64BIT - depdi \val,32+(\start),\length,\reg -#else - depwi \val,\start,\length,\reg -#endif - .endm - /* In LP64, the space contains part of the upper 32 bits of the * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ @@ -442,19 +416,19 @@ */ .macro L2_ptep pmd,pte,index,va,fault #if PT_NLEVELS == 3 - EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index + extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index #else - EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index + extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index #endif - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ copy %r0,\pte ldw,s \index(\pmd),\pmd bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault - DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ + dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ copy \pmd,%r9 SHLREG %r9,PxD_VALUE_SHIFT,\pmd - EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index - DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ + extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index + dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd LDREG %r0(\pmd),\pte /* pmd is now pte */ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault @@ -605,7 +579,7 @@ depdi 0,31,32,\tmp #endif copy \va,\tmp1 - DEPI 0,31,23,\tmp1 + depi 0,31,23,\tmp1 cmpb,COND(<>),n \tmp,\tmp1,\fault ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot depd,z \prot,8,7,\prot @@ -997,13 +971,6 @@ intr_restore: rfi nop - nop - nop - nop - nop - nop - nop - nop #ifndef CONFIG_PREEMPT # define intr_do_preempt intr_restore @@ -2076,9 +2043,10 @@ syscall_restore: LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ /* NOTE: We use rsm/ssm pair to make this operation atomic */ + LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */ rsm PSW_SM_I, %r0 - LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ - mfsp %sr3,%r1 /* Get users space id */ + copy %r1,%r30 /* Restore user sp */ + mfsp %sr3,%r1 /* Get user space id */ mtsp %r1,%sr7 /* Restore sr7 */ ssm PSW_SM_I, %r0 diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 0e3d9f9b9e33..4dbdf0ed6fa0 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -345,7 +345,7 @@ smp_slave_stext: ENDPROC(stext) #ifndef CONFIG_64BIT - .section .data.read_mostly + .section .data..read_mostly .align 4 .export $global$,data diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index d020eae6525c..4a91e433416f 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -53,11 +53,11 @@ union thread_union init_thread_union __init_task_data * guarantee that global objects will be laid out in memory in the same order * as the order of declaration, so put these in different sections and use * the linker script to order them. */ -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data.vm0.pmd"), aligned(PAGE_SIZE))); +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); #endif -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data.vm0.pgd"), aligned(PAGE_SIZE))); -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data.vm0.pte"), aligned(PAGE_SIZE))); +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); /* * Initial task structure. diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index f5f96021caa0..68e75ce838d6 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -47,18 +47,17 @@ ENTRY(linux_gateway_page) KILL_INSN .endr - /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */ + /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ #define __NR_lws_entries (2) lws_entry: - /* Unconditional branch to lws_start, located on the - same gateway page */ - b,n lws_start + gate lws_start, %r0 /* increase privilege */ + depi 3, 31, 2, %r31 /* Ensure we return into user mode. */ - /* Fill from 0xb4 to 0xe0 */ - .rept 11 + /* Fill from 0xb8 to 0xe0 */ + .rept 10 KILL_INSN .endr @@ -423,9 +422,6 @@ tracesys_sigexit: *********************************************************/ lws_start: - /* Gate and ensure we return to userspace */ - gate .+8, %r0 - depi 3, 31, 2, %r31 /* Ensure we return to userspace */ #ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just @@ -442,7 +438,7 @@ lws_start: #endif /* Is the lws entry number valid? */ - comiclr,>>= __NR_lws_entries, %r20, %r0 + comiclr,>> __NR_lws_entries, %r20, %r0 b,n lws_exit_nosys /* WARNING: Trashing sr2 and sr3 */ @@ -473,7 +469,7 @@ lws_exit: /* now reset the lowest bit of sp if it was set */ xor %r30,%r1,%r30 #endif - be,n 0(%sr3, %r31) + be,n 0(%sr7, %r31) @@ -529,7 +525,6 @@ lws_compare_and_swap32: #endif lws_compare_and_swap: -#ifdef CONFIG_SMP /* Load start of lock table */ ldil L%lws_lock_start, %r20 ldo R%lws_lock_start(%r20), %r28 @@ -572,8 +567,6 @@ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ -#endif -/* CONFIG_SMP */ /* prev = *addr; @@ -601,13 +594,11 @@ cas_action: 1: ldw 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 2: stw %r24, 0(%sr3,%r26) -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) -# endif #endif /* Return to userspace, set no error */ b lws_exit @@ -615,12 +606,10 @@ cas_action: 3: /* Error occured on load or store */ -#ifdef CONFIG_SMP /* Free lock */ stw %r20, 0(%sr2,%r20) -# if ENABLE_LWS_DEBUG +#if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) -# endif #endif b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ @@ -672,7 +661,6 @@ ENTRY(sys_call_table64) END(sys_call_table64) #endif -#ifdef CONFIG_SMP /* All light-weight-syscall atomic operations will use this set of locks @@ -694,8 +682,6 @@ ENTRY(lws_lock_start) .endr END(lws_lock_start) .previous -#endif -/* CONFIG_SMP for lws_lock_start */ .end diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 9dab4a4e09f7..d64a6bbec2aa 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -94,8 +94,8 @@ SECTIONS /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); - .data.lock_aligned : { - *(.data.lock_aligned) + .data..lock_aligned : { + *(.data..lock_aligned) } /* End of data section */ @@ -105,10 +105,10 @@ SECTIONS __bss_start = .; /* page table entries need to be PAGE_SIZE aligned */ . = ALIGN(PAGE_SIZE); - .data.vmpages : { - *(.data.vm0.pmd) - *(.data.vm0.pgd) - *(.data.vm0.pte) + .data..vmpages : { + *(.data..vm0.pmd) + *(.data..vm0.pgd) + *(.data..vm0.pte) } .bss : { *(.bss) diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c index 3ca1c6149218..27a7492ddb0d 100644 --- a/arch/parisc/math-emu/decode_exc.c +++ b/arch/parisc/math-emu/decode_exc.c @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) return SIGNALCODE(SIGFPE, FPE_FLTINV); case DIVISIONBYZEROEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); + Clear_excp_register(exception_index); return SIGNALCODE(SIGFPE, FPE_FLTDIV); case INEXACTEXCEPTION: update_trap_counts(Fpu_register, aflags, bflags, trap_counts); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index c6afbfc95770..18162ce4261e 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -264,8 +264,7 @@ no_context: out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_CRIT "VM: killing process %s\n", current->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); } diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index c4c4549c22bb..66a315e06dce 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -663,6 +663,9 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool (PPC64 || NOT_COHERENT_CACHE) +config NEED_SG_DMA_LENGTH + def_bool y + config GENERIC_ISA_DMA bool depends on PPC64 || POWER4 || 6xx && !CPM2 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 1a54a3b3a3fa..42dcd3f4ad7b 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -112,6 +112,11 @@ KBUILD_CFLAGS += $(call cc-option,-mspe=no) # kernel considerably. KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time) +# FIXME: the module load should be taught about the additional relocs +# generated by this. +# revert to pre-gcc-4.4 behaviour of .eh_frame +KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm) + # Never use string load/store instructions as they are # often slow when they are implemented at all KBUILD_CFLAGS += -mno-string diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 725634fc18c6..4b509411ad8a 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -42,7 +42,7 @@ extern struct ppc64_caches ppc64_caches; #endif /* __powerpc64__ && ! __ASSEMBLY__ */ #if !defined(__ASSEMBLY__) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index bfc4e027e2ad..358ff14ea25e 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -162,14 +162,6 @@ do { \ #endif /* !CONFIG_HUGETLB_PAGE */ -#ifdef MODULE -#define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) -#else -#define __page_aligned \ - __attribute__((__aligned__(PAGE_SIZE), \ - __section__(".data.page_aligned"))) -#endif - #define VM_DATA_DEFAULT_FLAGS \ (test_thread_flag(TIF_32BIT) ? \ VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h index 912bf597870f..34cc78fd0ef4 100644 --- a/arch/powerpc/include/asm/scatterlist.h +++ b/arch/powerpc/include/asm/scatterlist.h @@ -9,38 +9,12 @@ * 2 of the License, or (at your option) any later version. */ -#ifdef __KERNEL__ -#include #include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; - - /* For TCE or SWIOTLB support */ - dma_addr_t dma_address; - u32 dma_length; -}; - -/* - * These macros should be used after a dma_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->dma_length) +#include #ifdef __powerpc64__ #define ISA_DMA_THRESHOLD (~0UL) #endif - #define ARCH_HAS_SG_CHAIN -#endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SCATTERLIST_H */ diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 4ff4da2c238b..e7fe218b8697 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -39,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = { .dma_supported = swiotlb_dma_supported, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6c1df5757cd6..8d1de6f31d5a 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev, __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } -static inline void dma_direct_sync_single_range(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_direct_sync_single(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { - __dma_sync(bus_to_virt(dma_handle+offset), size, direction); + __dma_sync(bus_to_virt(dma_handle), size, direction); } #endif @@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = { .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, #ifdef CONFIG_NOT_COHERENT_CACHE - .sync_single_range_for_cpu = dma_direct_sync_single_range, - .sync_single_range_for_device = dma_direct_sync_single_range, + .sync_single_for_cpu = dma_direct_sync_single, + .sync_single_for_device = dma_direct_sync_single, .sync_sg_for_cpu = dma_direct_sync_sg, .sync_sg_for_device = dma_direct_sync_sg, #endif diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dcd01c82e701..8a0deefac08d 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -223,19 +223,17 @@ SECTIONS #endif /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { - INIT_TASK_DATA(THREAD_SIZE) - } + INIT_TASK_DATA_SECTION(THREAD_SIZE) - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { + .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) { PAGE_ALIGNED_DATA(PAGE_SIZE) } - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { + .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) { CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) } - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { + .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) { READ_MOSTLY_DATA(L1_CACHE_BYTES) } diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 5c2808252516..1a40da92154c 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1849,8 +1849,7 @@ out: return ret; } -static int spufs_mfc_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int spufs_mfc_fsync(struct file *file, int datasync) { return spufs_mfc_flush(file, NULL); } diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index fc1b1c42b1dc..e5e5f823d687 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -251,7 +251,7 @@ const struct file_operations spufs_context_fops = { .llseek = dcache_dir_lseek, .read = generic_read_dir, .readdir = dcache_readdir, - .fsync = simple_sync_file, + .fsync = noop_fsync, }; EXPORT_SYMBOL_GPL(spufs_context_fops); diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index 1fefae76e295..e19ff021e711 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -102,7 +102,7 @@ static const struct file_operations hcall_inst_seq_fops = { #define CPU_NAME_BUF_SIZE 32 -static void probe_hcall_entry(unsigned long opcode, unsigned long *args) +static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args) { struct hcall_stats *h; @@ -114,7 +114,7 @@ static void probe_hcall_entry(unsigned long opcode, unsigned long *args) h->purr_start = mfspr(SPRN_PURR); } -static void probe_hcall_exit(unsigned long opcode, unsigned long retval, +static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long retval, unsigned long *retbuf) { struct hcall_stats *h; @@ -140,11 +140,11 @@ static int __init hcall_inst_init(void) if (!firmware_has_feature(FW_FEATURE_LPAR)) return 0; - if (register_trace_hcall_entry(probe_hcall_entry)) + if (register_trace_hcall_entry(probe_hcall_entry, NULL)) return -EINVAL; - if (register_trace_hcall_exit(probe_hcall_exit)) { - unregister_trace_hcall_entry(probe_hcall_entry); + if (register_trace_hcall_exit(probe_hcall_exit, NULL)) { + unregister_trace_hcall_entry(probe_hcall_entry, NULL); return -EINVAL; } diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 6a1fde0d22b0..cd37e49e7034 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -1,6 +1,15 @@ /* * Freescale MPC85xx/MPC86xx RapidIO support * + * Copyright 2009 Sysgo AG + * Thomas Moll + * - fixed maintenance access routines, check for aligned access + * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine + * - Added Port-Write message handling + * - Added Machine Check exception handling + * * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. * Zhang Wei * @@ -24,19 +33,30 @@ #include #include #include +#include #include +#include +#include + +#undef DEBUG_PW /* Port-Write debugging */ /* RapidIO definition irq, which read from OF-tree */ #define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq) #define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq) #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) +#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) #define RIO_ATMU_REGS_OFFSET 0x10c00 #define RIO_P_MSG_REGS_OFFSET 0x11000 #define RIO_S_MSG_REGS_OFFSET 0x13000 #define RIO_ESCSR 0x158 #define RIO_CCSR 0x15c +#define RIO_LTLEDCSR 0x0608 +#define RIO_LTLEDCSR_IER 0x80000000 +#define RIO_LTLEDCSR_PRT 0x01000000 +#define RIO_LTLEECSR 0x060c +#define RIO_EPWISR 0x10010 #define RIO_ISR_AACR 0x10120 #define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ #define RIO_MAINT_WIN_SIZE 0x400000 @@ -55,6 +75,18 @@ #define RIO_MSG_ISR_QFI 0x00000010 #define RIO_MSG_ISR_DIQI 0x00000001 +#define RIO_IPWMR_SEN 0x00100000 +#define RIO_IPWMR_QFIE 0x00000100 +#define RIO_IPWMR_EIE 0x00000020 +#define RIO_IPWMR_CQ 0x00000002 +#define RIO_IPWMR_PWE 0x00000001 + +#define RIO_IPWSR_QF 0x00100000 +#define RIO_IPWSR_TE 0x00000080 +#define RIO_IPWSR_QFI 0x00000010 +#define RIO_IPWSR_PWD 0x00000008 +#define RIO_IPWSR_PWB 0x00000004 + #define RIO_MSG_DESC_SIZE 32 #define RIO_MSG_BUFFER_SIZE 4096 #define RIO_MIN_TX_RING_SIZE 2 @@ -121,7 +153,7 @@ struct rio_msg_regs { u32 pad10[26]; u32 pwmr; u32 pwsr; - u32 pad11; + u32 epwqbar; u32 pwqbar; }; @@ -160,6 +192,14 @@ struct rio_msg_rx_ring { void *dev_id; }; +struct rio_port_write_msg { + void *virt; + dma_addr_t phys; + u32 msg_count; + u32 err_count; + u32 discard_count; +}; + struct rio_priv { struct device *dev; void __iomem *regs_win; @@ -172,11 +212,64 @@ struct rio_priv { struct rio_dbell_ring dbell_ring; struct rio_msg_tx_ring msg_tx_ring; struct rio_msg_rx_ring msg_rx_ring; + struct rio_port_write_msg port_write_msg; int bellirq; int txirq; int rxirq; + int pwirq; + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; }; +#define __fsl_read_rio_config(x, addr, err, op) \ + __asm__ __volatile__( \ + "1: "op" %1,0(%2)\n" \ + " eieio\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: li %1,-1\n" \ + " li %0,%3\n" \ + " b 2b\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 2\n" \ + " .long 1b,3b\n" \ + ".text" \ + : "=r" (err), "=r" (x) \ + : "b" (addr), "i" (-EFAULT), "0" (err)) + +static void __iomem *rio_regs_win; + +static int (*saved_mcheck_exception)(struct pt_regs *regs); + +static int fsl_rio_mcheck_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *entry = NULL; + unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK); + + if (reason & MCSR_BUS_RBERR) { + reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); + if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { + /* Check if we are prepared to handle this fault */ + entry = search_exception_tables(regs->nip); + if (entry) { + pr_debug("RIO: %s - MC Exception handled\n", + __func__); + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), + 0); + regs->msr |= MSR_RI; + regs->nip = entry->fixup; + return 1; + } + } + } + + if (saved_mcheck_exception) + return saved_mcheck_exception(regs); + else + return cur_cpu_spec->machine_check(regs); +} + /** * fsl_rio_doorbell_send - Send a MPC85xx doorbell message * @mport: RapidIO master port info @@ -277,27 +370,44 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, { struct rio_priv *priv = mport->priv; u8 *data; + u32 rval, err = 0; pr_debug ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n", index, destid, hopcount, offset, len); + + /* 16MB maintenance window possible */ + /* allow only aligned access to maintenance registers */ + if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) + return -EINVAL; + out_be32(&priv->maint_atmu_regs->rowtar, - (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); + (destid << 22) | (hopcount << 12) | (offset >> 12)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); - data = (u8 *) priv->maint_win + offset; + data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: - *val = in_8((u8 *) data); + __fsl_read_rio_config(rval, data, err, "lbz"); break; case 2: - *val = in_be16((u16 *) data); + __fsl_read_rio_config(rval, data, err, "lhz"); break; - default: - *val = in_be32((u32 *) data); + case 4: + __fsl_read_rio_config(rval, data, err, "lwz"); break; + default: + return -EINVAL; } - return 0; + if (err) { + pr_debug("RIO: cfg_read error %d for %x:%x:%x\n", + err, destid, hopcount, offset); + } + + *val = rval; + + return err; } /** @@ -322,10 +432,17 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, pr_debug ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", index, destid, hopcount, offset, len, val); + + /* 16MB maintenance windows possible */ + /* allow only aligned access to maintenance registers */ + if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) + return -EINVAL; + out_be32(&priv->maint_atmu_regs->rowtar, - (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9)); + (destid << 22) | (hopcount << 12) | (offset >> 12)); + out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); - data = (u8 *) priv->maint_win + offset; + data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1)); switch (len) { case 1: out_8((u8 *) data, val); @@ -333,9 +450,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, case 2: out_be16((u16 *) data, val); break; - default: + case 4: out_be32((u32 *) data, val); break; + default: + return -EINVAL; } return 0; @@ -930,6 +1049,223 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport) return rc; } +/** + * fsl_rio_port_write_handler - MPC85xx port write interrupt handler + * @irq: Linux interrupt number + * @dev_instance: Pointer to interrupt-specific data + * + * Handles port write interrupts. Parses a list of registered + * port write event handlers and executes a matching event handler. + */ +static irqreturn_t +fsl_rio_port_write_handler(int irq, void *dev_instance) +{ + u32 ipwmr, ipwsr; + struct rio_mport *port = (struct rio_mport *)dev_instance; + struct rio_priv *priv = port->priv; + u32 epwisr, tmp; + + ipwmr = in_be32(&priv->msg_regs->pwmr); + ipwsr = in_be32(&priv->msg_regs->pwsr); + + epwisr = in_be32(priv->regs_win + RIO_EPWISR); + if (epwisr & 0x80000000) { + tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); + pr_info("RIO_LTLEDCSR = 0x%x\n", tmp); + out_be32(priv->regs_win + RIO_LTLEDCSR, 0); + } + + if (!(epwisr & 0x00000001)) + return IRQ_HANDLED; + +#ifdef DEBUG_PW + pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); + if (ipwsr & RIO_IPWSR_QF) + pr_debug(" QF"); + if (ipwsr & RIO_IPWSR_TE) + pr_debug(" TE"); + if (ipwsr & RIO_IPWSR_QFI) + pr_debug(" QFI"); + if (ipwsr & RIO_IPWSR_PWD) + pr_debug(" PWD"); + if (ipwsr & RIO_IPWSR_PWB) + pr_debug(" PWB"); + pr_debug(" )\n"); +#endif + out_be32(&priv->msg_regs->pwsr, + ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); + + if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { + priv->port_write_msg.err_count++; + pr_info("RIO: Port-Write Transaction Err (%d)\n", + priv->port_write_msg.err_count); + } + if (ipwsr & RIO_IPWSR_PWD) { + priv->port_write_msg.discard_count++; + pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + } + + /* Schedule deferred processing if PW was received */ + if (ipwsr & RIO_IPWSR_QFI) { + /* Save PW message (if there is room in FIFO), + * otherwise discard it. + */ + if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) { + priv->port_write_msg.msg_count++; + kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt, + RIO_PW_MSG_SIZE); + } else { + priv->port_write_msg.discard_count++; + pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", + priv->port_write_msg.discard_count); + } + schedule_work(&priv->pw_work); + } + + /* Issue Clear Queue command. This allows another + * port-write to be received. + */ + out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); + + return IRQ_HANDLED; +} + +static void fsl_pw_dpc(struct work_struct *work) +{ + struct rio_priv *priv = container_of(work, struct rio_priv, pw_work); + unsigned long flags; + u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; + + /* + * Process port-write messages + */ + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer, + RIO_PW_MSG_SIZE)) { + /* Process one message */ + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +#ifdef DEBUG_PW + { + u32 i; + pr_debug("%s : Port-Write Message:", __func__); + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) { + if ((i%4) == 0) + pr_debug("\n0x%02x: 0x%08x", i*4, + msg_buffer[i]); + else + pr_debug(" 0x%08x", msg_buffer[i]); + } + pr_debug("\n"); + } +#endif + /* Pass the port-write message to RIO core for processing */ + rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); + spin_lock_irqsave(&priv->pw_fifo_lock, flags); + } + spin_unlock_irqrestore(&priv->pw_fifo_lock, flags); +} + +/** + * fsl_rio_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable: 1=enable; 0=disable port-write message handling + */ +static int fsl_rio_pw_enable(struct rio_mport *mport, int enable) +{ + struct rio_priv *priv = mport->priv; + u32 rval; + + rval = in_be32(&priv->msg_regs->pwmr); + + if (enable) + rval |= RIO_IPWMR_PWE; + else + rval &= ~RIO_IPWMR_PWE; + + out_be32(&priv->msg_regs->pwmr, rval); + + return 0; +} + +/** + * fsl_rio_port_write_init - MPC85xx port write interface init + * @mport: Master port implementing the port write unit + * + * Initializes port write unit hardware and DMA buffer + * ring. Called from fsl_rio_setup(). Returns %0 on success + * or %-ENOMEM on failure. + */ +static int fsl_rio_port_write_init(struct rio_mport *mport) +{ + struct rio_priv *priv = mport->priv; + int rc = 0; + + /* Following configurations require a disabled port write controller */ + out_be32(&priv->msg_regs->pwmr, + in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE); + + /* Initialize port write */ + priv->port_write_msg.virt = dma_alloc_coherent(priv->dev, + RIO_PW_MSG_SIZE, + &priv->port_write_msg.phys, GFP_KERNEL); + if (!priv->port_write_msg.virt) { + pr_err("RIO: unable allocate port write queue\n"); + return -ENOMEM; + } + + priv->port_write_msg.err_count = 0; + priv->port_write_msg.discard_count = 0; + + /* Point dequeue/enqueue pointers at first entry */ + out_be32(&priv->msg_regs->epwqbar, 0); + out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys); + + pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n", + in_be32(&priv->msg_regs->epwqbar), + in_be32(&priv->msg_regs->pwqbar)); + + /* Clear interrupt status IPWSR */ + out_be32(&priv->msg_regs->pwsr, + (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); + + /* Configure port write contoller for snooping enable all reporting, + clear queue full */ + out_be32(&priv->msg_regs->pwmr, + RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ); + + + /* Hook up port-write handler */ + rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, + "port-write", (void *)mport); + if (rc < 0) { + pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); + goto err_out; + } + + INIT_WORK(&priv->pw_work, fsl_pw_dpc); + spin_lock_init(&priv->pw_fifo_lock); + if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { + pr_err("FIFO allocation failed\n"); + rc = -ENOMEM; + goto err_out_irq; + } + + pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n", + in_be32(&priv->msg_regs->pwmr), + in_be32(&priv->msg_regs->pwsr)); + + return rc; + +err_out_irq: + free_irq(IRQ_RIO_PW(mport), (void *)mport); +err_out: + dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE, + priv->port_write_msg.virt, + priv->port_write_msg.phys); + return rc; +} + static char *cmdline = NULL; static int fsl_rio_get_hdid(int index) @@ -1057,7 +1393,7 @@ int fsl_rio_setup(struct of_device *dev) dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n", law_start, law_size); - ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL); + ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); if (!ops) { rc = -ENOMEM; goto err_ops; @@ -1067,6 +1403,7 @@ int fsl_rio_setup(struct of_device *dev) ops->cread = fsl_rio_config_read; ops->cwrite = fsl_rio_config_write; ops->dsend = fsl_rio_doorbell_send; + ops->pwenable = fsl_rio_pw_enable; port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) { @@ -1089,11 +1426,12 @@ int fsl_rio_setup(struct of_device *dev) port->iores.flags = IORESOURCE_MEM; port->iores.name = "rio_io_win"; + priv->pwirq = irq_of_parse_and_map(dev->node, 0); priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4); - dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq, - priv->txirq, priv->rxirq); + dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n", + priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq); rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0); @@ -1109,6 +1447,7 @@ int fsl_rio_setup(struct of_device *dev) rio_register_mport(port); priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); + rio_regs_win = priv->regs_win; /* Probe the master port phy type */ ccsr = in_be32(priv->regs_win + RIO_CCSR); @@ -1166,7 +1505,8 @@ int fsl_rio_setup(struct of_device *dev) /* Configure maintenance transaction window */ out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12); - out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */ + out_be32(&priv->maint_atmu_regs->rowar, + 0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1)); priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE); @@ -1175,6 +1515,12 @@ int fsl_rio_setup(struct of_device *dev) (law_start + RIO_MAINT_WIN_SIZE) >> 12); out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */ fsl_rio_doorbell_init(port); + fsl_rio_port_write_init(port); + + saved_mcheck_exception = ppc_md.machine_check_exception; + ppc_md.machine_check_exception = fsl_rio_mcheck_exception; + /* Ensure that RFXE is set */ + mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); return 0; err: diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 79d0ca086820..bee1c0f794cf 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -102,6 +102,7 @@ config S390 select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK @@ -479,13 +480,6 @@ config CMM Everybody who wants to run Linux under VM should select this option. -config CMM_PROC - bool "/proc interface to cooperative memory management" - depends on CMM - help - Select this option to enable the /proc interface to the - cooperative memory management. - config CMM_IUCV bool "IUCV special message interface to cooperative memory management" depends on CMM && (SMSGIUCV=y || CMM=SMSGIUCV) diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 6e4a67ad07e1..1c999f726a58 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -7,7 +7,7 @@ BITS := $(if $(CONFIG_64BIT),64,31) targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ - vmlinux.bin.lzma misc.o piggy.o sizes.h head$(BITS).o + vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += $(cflags-y) @@ -47,6 +47,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZMA) := lzma +suffix-$(CONFIG_KERNEL_LZO) := lzo $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) $(call if_changed,gzip) @@ -54,6 +55,8 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) $(call if_changed,bzip2) $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) $(call if_changed,lzma) +$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) + $(call if_changed,lzo) LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 14e0479d3888..0851eb1e919e 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -50,6 +50,10 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/decompress_unlzma.c" #endif +#ifdef CONFIG_KERNEL_LZO +#include "../../../../lib/decompress_unlzo.c" +#endif + extern _sclp_print_early(const char *); int puts(const char *s) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 451bfbb9db3d..76daea117181 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,6 +15,7 @@ #include #include +#include #define ATOMIC_INIT(i) { (i) } @@ -274,6 +275,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) { long long c, old; + c = atomic64_read(v); for (;;) { if (unlikely(c == u)) @@ -286,6 +288,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) return c != u; } +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long c, old, dec; + + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) #define atomic64_inc(_v) atomic64_add_return(1, _v) diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 9b866816863c..24aafa68b643 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -14,6 +14,6 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #endif diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index f4bd346a52d3..1c0030f9b890 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -91,6 +91,14 @@ struct ccw_device { void (*handler) (struct ccw_device *, unsigned long, struct irb *); }; +/* + * Possible CIO actions triggered by the unit check handler. + */ +enum uc_todo { + UC_TODO_RETRY, + UC_TODO_RETRY_ON_NEW_PATH, + UC_TODO_STOP +}; /** * struct ccw driver - device driver for channel attached devices @@ -107,6 +115,7 @@ struct ccw_device { * @freeze: callback for freezing during hibernation snapshotting * @thaw: undo work done in @freeze * @restore: callback for restoring after hibernation + * @uc_handler: callback for unit check handler * @driver: embedded device driver structure * @name: device driver name */ @@ -124,6 +133,7 @@ struct ccw_driver { int (*freeze)(struct ccw_device *); int (*thaw) (struct ccw_device *); int (*restore)(struct ccw_device *); + enum uc_todo (*uc_handler) (struct ccw_device *, struct irb *); struct device_driver driver; char *name; }; diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 35d786fe93ae..be44d94cba54 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h @@ -1 +1,3 @@ +#define ISA_DMA_THRESHOLD (~0UL) + #include diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index d9b490a2716e..5232278d79ad 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -132,8 +132,6 @@ int main(void) DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); - DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); - DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); @@ -154,6 +152,8 @@ int main(void) DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); + DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); + DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); #endif /* CONFIG_32BIT */ return 0; } diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 178d92536d90..e7192e1cb678 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -65,7 +65,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ltgr %r3,%r3 jz 0f basr %r14,%r3 - 0: +0: #endif .endm diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 3d34eef5a2c3..2a3d2bf6f083 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -63,6 +63,8 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) case 0x0b: /* bsm */ case 0x83: /* diag */ case 0x44: /* ex */ + case 0xac: /* stnsm */ + case 0xad: /* stosm */ return -EINVAL; } switch (*(__u16 *) instruction) { @@ -72,6 +74,7 @@ int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) case 0xb258: /* bsg */ case 0xb218: /* pc */ case 0xb228: /* pt */ + case 0xb98d: /* epsw */ return -EINVAL; } return 0; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7d893248d265..c8e8e1354e1d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -401,7 +401,6 @@ setup_lowcore(void) lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; - lc->cmf_hpp = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; @@ -418,6 +417,7 @@ setup_lowcore(void) __ctl_set_bit(14, 29); } #else + lc->cmf_hpp = -1ULL; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index e4d98de83dd8..541053ed234e 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -944,21 +944,21 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, struct cpu *c = &per_cpu(cpu_devices, cpu); struct sys_device *s = &c->sysdev; struct s390_idle_data *idle; + int err = 0; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: idle = &per_cpu(s390_idle, cpu); memset(idle, 0, sizeof(struct s390_idle_data)); - if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) - return NOTIFY_BAD; + err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); break; case CPU_DEAD: case CPU_DEAD_FROZEN: sysfs_remove_group(&s->kobj, &cpu_online_attr_group); break; } - return NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block __cpuinitdata smp_cpu_nb = { diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index c56d3f56d020..1f066e46e83e 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -264,7 +264,7 @@ restore_registers: lghi %r2,0 br %r14 - .section .data.nosave,"aw",@progbits + .section .data..nosave,"aw",@progbits .align 8 .Ldisabled_wait_31: .long 0x000a0000,0x00000000 diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 2f4b687cc7fa..a7251580891c 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -33,17 +33,6 @@ config KVM If unsure, say N. -config KVM_AWARE_CMF - depends on KVM - bool "KVM aware sampling" - ---help--- - This option enhances the sampling data from the CPU Measurement - Facility with additional information, that allows to distinguish - guest(s) and host when using the kernel based virtual machine - functionality. - - If unsure, say N. - # OK, it's a little counter-intuitive to do this, but it puts it neatly under # the virtualization menu. source drivers/vhost/Kconfig diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S index 31646bd0e469..7e9d30d567b0 100644 --- a/arch/s390/kvm/sie64a.S +++ b/arch/s390/kvm/sie64a.S @@ -32,12 +32,10 @@ SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW .macro SPP newpp -#ifdef CONFIG_KVM_AWARE_CMF tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP jz 0f .insn s,0xb2800000,\newpp - 0: -#endif +0: .endm sie_irq_handler: diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index f87b34731e1d..eb6a2ef5f82e 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -1,11 +1,9 @@ /* - * arch/s390/mm/cmm.c + * Collaborative memory management interface. * - * S390 version - * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * Copyright IBM Corp 2003,2010 + * Author(s): Martin Schwidefsky , * - * Collaborative memory management interface. */ #include @@ -20,9 +18,9 @@ #include #include #include +#include #include -#include #include static char *sender = "VMRMSVM"; @@ -53,14 +51,14 @@ static struct cmm_page_array *cmm_timed_page_list; static DEFINE_SPINLOCK(cmm_lock); static struct task_struct *cmm_thread_ptr; -static wait_queue_head_t cmm_thread_wait; -static struct timer_list cmm_timer; +static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait); +static DEFINE_TIMER(cmm_timer, NULL, 0, 0); static void cmm_timer_fn(unsigned long); static void cmm_set_timer(void); -static long -cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) +static long cmm_alloc_pages(long nr, long *counter, + struct cmm_page_array **list) { struct cmm_page_array *pa, *npa; unsigned long addr; @@ -99,8 +97,7 @@ cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) return nr; } -static long -cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) +static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) { struct cmm_page_array *pa; unsigned long addr; @@ -140,11 +137,10 @@ static int cmm_oom_notify(struct notifier_block *self, } static struct notifier_block cmm_oom_nb = { - .notifier_call = cmm_oom_notify + .notifier_call = cmm_oom_notify, }; -static int -cmm_thread(void *dummy) +static int cmm_thread(void *dummy) { int rc; @@ -170,7 +166,7 @@ cmm_thread(void *dummy) cmm_timed_pages_target = cmm_timed_pages; } else if (cmm_timed_pages_target < cmm_timed_pages) { cmm_free_pages(1, &cmm_timed_pages, - &cmm_timed_page_list); + &cmm_timed_page_list); } if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) cmm_set_timer(); @@ -178,14 +174,12 @@ cmm_thread(void *dummy) return 0; } -static void -cmm_kick_thread(void) +static void cmm_kick_thread(void) { wake_up(&cmm_thread_wait); } -static void -cmm_set_timer(void) +static void cmm_set_timer(void) { if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { if (timer_pending(&cmm_timer)) @@ -202,8 +196,7 @@ cmm_set_timer(void) add_timer(&cmm_timer); } -static void -cmm_timer_fn(unsigned long ignored) +static void cmm_timer_fn(unsigned long ignored) { long nr; @@ -216,57 +209,49 @@ cmm_timer_fn(unsigned long ignored) cmm_set_timer(); } -void -cmm_set_pages(long nr) +static void cmm_set_pages(long nr) { cmm_pages_target = nr; cmm_kick_thread(); } -long -cmm_get_pages(void) +static long cmm_get_pages(void) { return cmm_pages; } -void -cmm_add_timed_pages(long nr) +static void cmm_add_timed_pages(long nr) { cmm_timed_pages_target += nr; cmm_kick_thread(); } -long -cmm_get_timed_pages(void) +static long cmm_get_timed_pages(void) { return cmm_timed_pages; } -void -cmm_set_timeout(long nr, long seconds) +static void cmm_set_timeout(long nr, long seconds) { cmm_timeout_pages = nr; cmm_timeout_seconds = seconds; cmm_set_timer(); } -static int -cmm_skip_blanks(char *cp, char **endp) +static int cmm_skip_blanks(char *cp, char **endp) { char *str; - for (str = cp; *str == ' ' || *str == '\t'; str++); + for (str = cp; *str == ' ' || *str == '\t'; str++) + ; *endp = str; return str != cp; } -#ifdef CONFIG_CMM_PROC - static struct ctl_table cmm_table[]; -static int -cmm_pages_handler(ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) { char buf[16], *p; long nr; @@ -305,9 +290,8 @@ cmm_pages_handler(ctl_table *ctl, int write, return 0; } -static int -cmm_timeout_handler(ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) +static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) { char buf[64], *p; long nr, seconds; @@ -370,12 +354,10 @@ static struct ctl_table cmm_dir_table[] = { }, { } }; -#endif #ifdef CONFIG_CMM_IUCV #define SMSG_PREFIX "CMM" -static void -cmm_smsg_target(const char *from, char *msg) +static void cmm_smsg_target(const char *from, char *msg) { long nr, seconds; @@ -445,16 +427,13 @@ static struct notifier_block cmm_power_notifier = { .notifier_call = cmm_power_event, }; -static int -cmm_init (void) +static int cmm_init(void) { int rc = -ENOMEM; -#ifdef CONFIG_CMM_PROC cmm_sysctl_header = register_sysctl_table(cmm_dir_table); if (!cmm_sysctl_header) goto out_sysctl; -#endif #ifdef CONFIG_CMM_IUCV rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); if (rc < 0) @@ -466,8 +445,6 @@ cmm_init (void) rc = register_pm_notifier(&cmm_power_notifier); if (rc) goto out_pm; - init_waitqueue_head(&cmm_thread_wait); - init_timer(&cmm_timer); cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); rc = IS_ERR(cmm_thread_ptr) ? PTR_ERR(cmm_thread_ptr) : 0; if (rc) @@ -483,36 +460,26 @@ out_oom_notify: smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); out_smsg: #endif -#ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); out_sysctl: -#endif + del_timer_sync(&cmm_timer); return rc; } +module_init(cmm_init); -static void -cmm_exit(void) +static void cmm_exit(void) { - kthread_stop(cmm_thread_ptr); - unregister_pm_notifier(&cmm_power_notifier); - unregister_oom_notifier(&cmm_oom_nb); - cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); - cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); -#ifdef CONFIG_CMM_PROC unregister_sysctl_table(cmm_sysctl_header); -#endif #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); #endif + unregister_pm_notifier(&cmm_power_notifier); + unregister_oom_notifier(&cmm_oom_nb); + kthread_stop(cmm_thread_ptr); + del_timer_sync(&cmm_timer); + cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); + cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); } - -module_init(cmm_init); module_exit(cmm_exit); -EXPORT_SYMBOL(cmm_set_pages); -EXPORT_SYMBOL(cmm_get_pages); -EXPORT_SYMBOL(cmm_add_timed_pages); -EXPORT_SYMBOL(cmm_get_timed_pages); -EXPORT_SYMBOL(cmm_set_timeout); - MODULE_LICENSE("GPL"); diff --git a/arch/score/include/asm/scatterlist.h b/arch/score/include/asm/scatterlist.h index 9f533b8362c7..4fa1a6658215 100644 --- a/arch/score/include/asm/scatterlist.h +++ b/arch/score/include/asm/scatterlist.h @@ -1,6 +1,8 @@ #ifndef _ASM_SCORE_SCATTERLIST_H #define _ASM_SCORE_SCATTERLIST_H +#define ISA_DMA_THRESHOLD (~0UL) + #include #endif /* _ASM_SCORE_SCATTERLIST_H */ diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 0e318c905eea..c5ee4ce60b57 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -186,6 +186,9 @@ config DMA_NONCOHERENT config NEED_DMA_MAP_STATE def_bool DMA_NONCOHERENT +config NEED_SG_DMA_LENGTH + def_bool y + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr index f02382ae5c48..862d74808236 100644 --- a/arch/sh/boot/compressed/vmlinux.scr +++ b/arch/sh/boot/compressed/vmlinux.scr @@ -1,6 +1,6 @@ SECTIONS { - .rodata.compressed : { + .rodata..compressed : { input_len = .; LONG(input_data_end - input_data) input_data = .; *(.data) diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h index e461d67f03c3..ef9e555aafba 100644 --- a/arch/sh/include/asm/cache.h +++ b/arch/sh/include/asm/cache.h @@ -14,7 +14,7 @@ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifndef __ASSEMBLY__ struct cache_info { diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index d4104ce9fe53..6c4bbba2a675 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -435,29 +435,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) REGSET_DSP, 0, sizeof(struct pt_dspregs), (const void __user *)data); -#endif -#ifdef CONFIG_BINFMT_ELF_FDPIC - case PTRACE_GETFDPIC: { - unsigned long tmp = 0; - - switch (addr) { - case PTRACE_GETFDPIC_EXEC: - tmp = child->mm->context.exec_fdpic_loadmap; - break; - case PTRACE_GETFDPIC_INTERP: - tmp = child->mm->context.interp_fdpic_loadmap; - break; - default: - break; - } - - ret = 0; - if (put_user(tmp, datap)) { - ret = -EFAULT; - break; - } - break; - } #endif default: ret = ptrace_request(child, request, addr, data); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index d6781ce687e2..6f1470baa314 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -133,6 +133,9 @@ config ZONE_DMA config NEED_DMA_MAP_STATE def_bool y +config NEED_SG_DMA_LENGTH + def_bool y + config GENERIC_ISA_DMA bool default y if SPARC32 diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c index bbf91b9c3d39..e7f2940bd270 100644 --- a/arch/sparc/boot/btfixupprep.c +++ b/arch/sparc/boot/btfixupprep.c @@ -325,7 +325,7 @@ main1: (*rr)->next = NULL; } printf("! Generated by btfixupprep. Do not edit.\n\n"); - printf("\t.section\t\".data.init\",#alloc,#write\n\t.align\t4\n\n"); + printf("\t.section\t\".data..init\",#alloc,#write\n\t.align\t4\n\n"); printf("\t.global\t___btfixup_start\n___btfixup_start:\n\n"); for (i = 0; i < last; i++) { f = array + i; diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h index 78b07009f60a..0588b8c7faa2 100644 --- a/arch/sparc/include/asm/cache.h +++ b/arch/sparc/include/asm/cache.h @@ -21,7 +21,7 @@ #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #ifdef CONFIG_SPARC32 #include diff --git a/arch/sparc/include/asm/scatterlist.h b/arch/sparc/include/asm/scatterlist.h index d1120257b033..433e45f05fd4 100644 --- a/arch/sparc/include/asm/scatterlist.h +++ b/arch/sparc/include/asm/scatterlist.h @@ -1,8 +1,9 @@ #ifndef _SPARC_SCATTERLIST_H #define _SPARC_SCATTERLIST_H -#define sg_dma_len(sg) ((sg)->dma_length) - #include +#define ISA_DMA_THRESHOLD (~0UL) +#define ARCH_HAS_SG_CHAIN + #endif /* !(_SPARC_SCATTERLIST_H) */ diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 34ce49f80eac..0ec92c8861dd 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -92,6 +92,8 @@ struct cpu_hw_events { /* Enabled/disable state. */ int enabled; + + unsigned int group_flag; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; @@ -981,53 +983,6 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -static void event_sched_in(struct perf_event *event) -{ - event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = smp_processor_id(); - event->tstamp_running += event->ctx->time - event->tstamp_stopped; - if (is_software_event(event)) - event->pmu->enable(event); -} - -int hw_perf_group_sched_in(struct perf_event *group_leader, - struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct perf_event *sub; - int n0, n; - - if (!sparc_pmu) - return 0; - - n0 = cpuc->n_events; - n = collect_events(group_leader, perf_max_events - n0, - &cpuc->event[n0], &cpuc->events[n0], - &cpuc->current_idx[n0]); - if (n < 0) - return -EAGAIN; - if (check_excludes(cpuc->event, n0, n)) - return -EINVAL; - if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) - return -EAGAIN; - cpuc->n_events = n0 + n; - cpuc->n_added += n; - - cpuctx->active_oncpu += n; - n = 1; - event_sched_in(group_leader); - list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { - if (sub->state != PERF_EVENT_STATE_OFF) { - event_sched_in(sub); - n++; - } - } - ctx->nr_active += n; - - return 1; -} - static int sparc_pmu_enable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); @@ -1045,11 +1000,20 @@ static int sparc_pmu_enable(struct perf_event *event) cpuc->events[n0] = event->hw.event_base; cpuc->current_idx[n0] = PIC_NO_INDEX; + /* + * If group events scheduling transaction was started, + * skip the schedulability test here, it will be peformed + * at commit time(->commit_txn) as a whole + */ + if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) + goto nocheck; + if (check_excludes(cpuc->event, n0, 1)) goto out; if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) goto out; +nocheck: cpuc->n_events++; cpuc->n_added++; @@ -1129,11 +1093,61 @@ static int __hw_perf_event_init(struct perf_event *event) return 0; } +/* + * Start group events scheduling transaction + * Set the flag to make pmu::enable() not perform the + * schedulability test, it will be performed at commit time + */ +static void sparc_pmu_start_txn(const struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; +} + +/* + * Stop group events scheduling transaction + * Clear the flag and pmu::enable() will perform the + * schedulability test. + */ +static void sparc_pmu_cancel_txn(const struct pmu *pmu) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; +} + +/* + * Commit group events scheduling transaction + * Perform the group schedulability test as a whole + * Return 0 if success + */ +static int sparc_pmu_commit_txn(const struct pmu *pmu) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int n; + + if (!sparc_pmu) + return -EINVAL; + + cpuc = &__get_cpu_var(cpu_hw_events); + n = cpuc->n_events; + if (check_excludes(cpuc->event, 0, n)) + return -EINVAL; + if (sparc_check_constraints(cpuc->event, cpuc->events, n)) + return -EAGAIN; + + return 0; +} + static const struct pmu pmu = { .enable = sparc_pmu_enable, .disable = sparc_pmu_disable, .read = sparc_pmu_read, .unthrottle = sparc_pmu_unthrottle, + .start_txn = sparc_pmu_start_txn, + .cancel_txn = sparc_pmu_cancel_txn, + .commit_txn = sparc_pmu_commit_txn, }; const struct pmu *hw_perf_event_init(struct perf_event *event) diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 7fcad58e216d..69268014dd8e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -94,7 +94,7 @@ SECTIONS .data : { INIT_TASK_DATA(KERNEL_STACK_SIZE) . = ALIGN(KERNEL_STACK_SIZE); - *(.data.init_irqstack) + *(.data..init_irqstack) DATA_DATA *(.data.* .gnu.linkonce.d.*) SORT(CONSTRUCTORS) diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c index 8aa77b61a5ff..ddc9698b66ed 100644 --- a/arch/um/kernel/init_task.c +++ b/arch/um/kernel/init_task.c @@ -34,5 +34,5 @@ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; union thread_union cpu0_irqstack - __attribute__((__section__(".data.init_irqstack"))) = + __attribute__((__section__(".data..init_irqstack"))) = { INIT_THREAD_INFO(init_task) }; diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index e7a6cca667aa..ec6378550671 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -50,7 +50,7 @@ SECTIONS { INIT_TASK_DATA(KERNEL_STACK_SIZE) . = ALIGN(KERNEL_STACK_SIZE); - *(.data.init_irqstack) + *(.data..init_irqstack) DATA_DATA *(.gnu.linkonce.d*) CONSTRUCTORS diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore new file mode 100644 index 000000000000..028079065af6 --- /dev/null +++ b/arch/x86/.gitignore @@ -0,0 +1,3 @@ +boot/compressed/vmlinux +tools/test_get_len + diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e0c619c55b4e..dcb0593b4a66 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -109,6 +109,9 @@ config SBUS config NEED_DMA_MAP_STATE def_bool (X86_64 || DMAR || DMA_API_DEBUG) +config NEED_SG_DMA_LENGTH + def_bool y + config GENERIC_ISA_DMA def_bool y @@ -1703,6 +1706,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID def_bool X86_64 depends on NUMA +config USE_PERCPU_NUMA_NODE_ID + def_bool X86_64 + depends on NUMA + menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index bcbd36c41432..5c228129d175 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -77,7 +77,7 @@ int main(int argc, char *argv[]) offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ - printf(".section \".rodata.compressed\",\"a\",@progbits\n"); + printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index a6f1a59a5b0c..5ddabceee124 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -26,8 +26,8 @@ SECTIONS HEAD_TEXT _ehead = . ; } - .rodata.compressed : { - *(.rodata.compressed) + .rodata..compressed : { + *(.rodata..compressed) } .text : { _text = .; /* Text */ diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 56f462cf22d2..aa2c39d968fc 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -85,7 +85,6 @@ extern int acpi_ioapic; extern int acpi_noirq; extern int acpi_strict; extern int acpi_disabled; -extern int acpi_ht; extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; @@ -97,7 +96,6 @@ void acpi_pic_sci_set_trigger(unsigned int, u16); static inline void disable_acpi(void) { acpi_disabled = 1; - acpi_ht = 0; acpi_pci_disabled = 1; acpi_noirq = 1; } diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h index 2f9047cfaaca..48f99f15452e 100644 --- a/arch/x86/include/asm/cache.h +++ b/arch/x86/include/asm/cache.h @@ -7,7 +7,7 @@ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index dca9c545f44e..468145914389 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -332,6 +332,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) #endif } +#if __GNUC__ >= 4 #define static_cpu_has(bit) \ ( \ __builtin_constant_p(boot_cpu_has(bit)) ? \ @@ -340,6 +341,12 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) __static_cpu_has(bit) : \ boot_cpu_has(bit) \ ) +#else +/* + * gcc 3.x is too stupid to do the static test; fall back to dynamic. + */ +#define static_cpu_has(bit) boot_cpu_has(bit) +#endif #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 6c3fdd631ed3..f32a4301c4d4 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -225,5 +225,13 @@ extern void mcheck_intel_therm_init(void); static inline void mcheck_intel_therm_init(void) { } #endif +/* + * Used by APEI to report memory error via /dev/mcelog + */ + +struct cper_sec_mem_err; +extern void apei_mce_report_mem_error(int corrected, + struct cper_sec_mem_err *mem_err); + #endif /* __KERNEL__ */ #endif /* _ASM_X86_MCE_H */ diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index b05400a542ff..64a8ebff06fc 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -89,7 +89,8 @@ P4_CCCR_ENABLE) /* HT mask */ -#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY) +#define P4_CCCR_MASK_HT \ + (P4_CCCR_MASK | P4_CCCR_OVF_PMI_T1 | P4_CCCR_THREAD_ANY) #define P4_GEN_ESCR_EMASK(class, name, bit) \ class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h deleted file mode 100644 index c8e9c8bed3d0..000000000000 --- a/arch/x86/include/asm/rdc321x_defs.h +++ /dev/null @@ -1,12 +0,0 @@ -#define PFX "rdc321x: " - -/* General purpose configuration and data registers */ -#define RDC3210_CFGREG_ADDR 0x0CF8 -#define RDC3210_CFGREG_DATA 0x0CFC - -#define RDC321X_GPIO_CTRL_REG1 0x48 -#define RDC321X_GPIO_CTRL_REG2 0x84 -#define RDC321X_GPIO_DATA_REG1 0x4c -#define RDC321X_GPIO_DATA_REG2 0x88 - -#define RDC321X_MAX_GPIO 58 diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h index 75af592677ec..fb0b1874396f 100644 --- a/arch/x86/include/asm/scatterlist.h +++ b/arch/x86/include/asm/scatterlist.h @@ -1,8 +1,9 @@ #ifndef _ASM_X86_SCATTERLIST_H #define _ASM_X86_SCATTERLIST_H -#define ISA_DMA_THRESHOLD (0x00ffffff) - #include +#define ISA_DMA_THRESHOLD (0x00ffffff) +#define ARCH_HAS_SG_CHAIN + #endif /* _ASM_X86_SCATTERLIST_H */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 62ba9400cc43..f0b6e5dbc5a0 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -239,8 +239,8 @@ static inline struct thread_info *current_thread_info(void) #define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ #define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/ -#define TS_POLLING 0x0004 /* true if in idle loop - and not sleeping */ +#define TS_POLLING 0x0004 /* idle task polling need_resched, + skip sending interrupt */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index c5087d796587..21899cc31e52 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -53,33 +53,29 @@ extern int cpu_to_node_map[]; /* Returns the number of the node containing CPU 'cpu' */ -static inline int cpu_to_node(int cpu) +static inline int __cpu_to_node(int cpu) { return cpu_to_node_map[cpu]; } -#define early_cpu_to_node(cpu) cpu_to_node(cpu) +#define early_cpu_to_node __cpu_to_node +#define cpu_to_node __cpu_to_node #else /* CONFIG_X86_64 */ /* Mappings between logical cpu number and node number */ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); -/* Returns the number of the current Node. */ -DECLARE_PER_CPU(int, node_number); -#define numa_node_id() percpu_read(node_number) - #ifdef CONFIG_DEBUG_PER_CPU_MAPS -extern int cpu_to_node(int cpu); +/* + * override generic percpu implementation of cpu_to_node + */ +extern int __cpu_to_node(int cpu); +#define cpu_to_node __cpu_to_node + extern int early_cpu_to_node(int cpu); #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ -/* Returns the number of the node containing CPU 'cpu' */ -static inline int cpu_to_node(int cpu) -{ - return per_cpu(x86_cpu_to_node_map, cpu); -} - /* Same function but used if called before per_cpu areas are setup */ static inline int early_cpu_to_node(int cpu) { @@ -170,6 +166,10 @@ static inline int numa_node_id(void) { return 0; } +/* + * indicate override: + */ +#define numa_node_id numa_node_id static inline int early_cpu_to_node(int cpu) { diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 488be461a380..60cc4058ed5f 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -63,7 +63,6 @@ EXPORT_SYMBOL(acpi_disabled); int acpi_noirq; /* skip ACPI IRQ initialization */ int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); -int acpi_ht __initdata = 1; /* enable HT */ int acpi_lapic; int acpi_ioapic; @@ -1501,9 +1500,8 @@ void __init acpi_boot_table_init(void) /* * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs */ - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return; /* @@ -1534,9 +1532,8 @@ int __init early_acpi_boot_init(void) { /* * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs */ - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return 1; /* @@ -1554,9 +1551,8 @@ int __init acpi_boot_init(void) /* * If acpi_disabled, bail out - * One exception: acpi=ht continues far enough to enumerate LAPICs */ - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return 1; acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); @@ -1591,21 +1587,12 @@ static int __init parse_acpi(char *arg) /* acpi=force to over-ride black-list */ else if (strcmp(arg, "force") == 0) { acpi_force = 1; - acpi_ht = 1; acpi_disabled = 0; } /* acpi=strict disables out-of-spec workarounds */ else if (strcmp(arg, "strict") == 0) { acpi_strict = 1; } - /* Limit ACPI just to boot-time to enable HT */ - else if (strcmp(arg, "ht") == 0) { - if (!acpi_force) { - printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n"); - disable_acpi(); - } - acpi_ht = 1; - } /* acpi=rsdt use RSDT instead of XSDT */ else if (strcmp(arg, "rsdt") == 0) { acpi_rsdt_forced = 1; diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index f9961034e557..82e508677b91 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -162,8 +162,6 @@ static int __init acpi_sleep_setup(char *str) #endif if (strncmp(str, "old_ordering", 12) == 0) acpi_old_suspend_ordering(); - if (strncmp(str, "sci_force_enable", 16) == 0) - acpi_set_sci_en_on_resume(); str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 8ded418b0593..13ab720573e3 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -1,4 +1,4 @@ - .section .text.page_aligned + .section .text..page_aligned #include #include #include diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e5a4a1e01618..c02cc692985c 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -51,6 +51,7 @@ #include #include #include +#include unsigned int num_processors; @@ -1151,8 +1152,13 @@ static void __cpuinit lapic_setup_esr(void) */ void __cpuinit setup_local_APIC(void) { - unsigned int value; - int i, j; + unsigned int value, queued; + int i, j, acked = 0; + unsigned long long tsc = 0, ntsc; + long long max_loops = cpu_khz; + + if (cpu_has_tsc) + rdtscll(tsc); if (disable_apic) { arch_disable_smp_support(); @@ -1204,13 +1210,32 @@ void __cpuinit setup_local_APIC(void) * the interrupt. Hence a vector might get locked. It was noticed * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. */ - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1<= 0; i--) + queued |= apic_read(APIC_IRR + i*0x10); + + for (i = APIC_ISR_NR - 1; i >= 0; i--) { + value = apic_read(APIC_ISR + i*0x10); + for (j = 31; j >= 0; j--) { + if (value & (1< 256) { + printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", + acked); + break; + } + if (cpu_has_tsc) { + rdtscll(ntsc); + max_loops = (cpu_khz << 10) - (ntsc - tsc); + } else + max_loops--; + } while (queued && max_loops > 0); + WARN_ON(max_loops <= 0); /* * Now that we are all set up, enable the APIC diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index cc83a002786e..68e4a6f2211e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1121,9 +1121,9 @@ void __cpuinit cpu_init(void) oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA - if (cpu != 0 && percpu_read(node_number) == 0 && - cpu_to_node(cpu) != NUMA_NO_NODE) - percpu_write(node_number, cpu_to_node(cpu)); + if (cpu != 0 && percpu_read(numa_node) == 0 && + early_cpu_to_node(cpu) != NUMA_NO_NODE) + set_numa_node(early_cpu_to_node(cpu)); #endif me = current; diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 6f3dc8fbbfdc..7ec2123838e6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c @@ -1497,8 +1497,8 @@ static struct cpufreq_driver cpufreq_amd64_driver = { * simply keep the boost-disable flag in sync with the current global * state. */ -static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action, - void *hcpu) +static int cpb_notify(struct notifier_block *nb, unsigned long action, + void *hcpu) { unsigned cpu = (long)hcpu; u32 lo, hi; @@ -1528,7 +1528,7 @@ static int __cpuinit cpb_notify(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata cpb_nb = { +static struct notifier_block cpb_nb = { .notifier_call = cpb_notify, }; diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index 4ac6d48fe11b..bb34b03af252 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile @@ -7,3 +7,5 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o + +obj-$(CONFIG_ACPI_APEI) += mce-apei.o diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c new file mode 100644 index 000000000000..745b54f9be89 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c @@ -0,0 +1,138 @@ +/* + * Bridge between MCE and APEI + * + * On some machine, corrected memory errors are reported via APEI + * generic hardware error source (GHES) instead of corrected Machine + * Check. These corrected memory errors can be reported to user space + * through /dev/mcelog via faking a corrected Machine Check, so that + * the error memory page can be offlined by /sbin/mcelog if the error + * count for one page is beyond the threshold. + * + * For fatal MCE, save MCE record into persistent storage via ERST, so + * that the MCE record can be logged after reboot via ERST. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +#include "mce-internal.h" + +void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err) +{ + struct mce m; + + /* Only corrected MC is reported */ + if (!corrected) + return; + + mce_setup(&m); + m.bank = 1; + /* Fake a memory read corrected error with unknown channel */ + m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f; + m.addr = mem_err->physical_addr; + mce_log(&m); + mce_notify_irq(); +} +EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); + +#define CPER_CREATOR_MCE \ + UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ + 0x64, 0x90, 0xb8, 0x9d) +#define CPER_SECTION_TYPE_MCE \ + UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ + 0x04, 0x4a, 0x38, 0xfc) + +/* + * CPER specification (in UEFI specification 2.3 appendix N) requires + * byte-packed. + */ +struct cper_mce_record { + struct cper_record_header hdr; + struct cper_section_descriptor sec_hdr; + struct mce mce; +} __packed; + +int apei_write_mce(struct mce *m) +{ + struct cper_mce_record rcd; + + memset(&rcd, 0, sizeof(rcd)); + memcpy(rcd.hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE); + rcd.hdr.revision = CPER_RECORD_REV; + rcd.hdr.signature_end = CPER_SIG_END; + rcd.hdr.section_count = 1; + rcd.hdr.error_severity = CPER_SER_FATAL; + /* timestamp, platform_id, partition_id are all invalid */ + rcd.hdr.validation_bits = 0; + rcd.hdr.record_length = sizeof(rcd); + rcd.hdr.creator_id = CPER_CREATOR_MCE; + rcd.hdr.notification_type = CPER_NOTIFY_MCE; + rcd.hdr.record_id = cper_next_record_id(); + rcd.hdr.flags = CPER_HW_ERROR_FLAGS_PREVERR; + + rcd.sec_hdr.section_offset = (void *)&rcd.mce - (void *)&rcd; + rcd.sec_hdr.section_length = sizeof(rcd.mce); + rcd.sec_hdr.revision = CPER_SEC_REV; + /* fru_id and fru_text is invalid */ + rcd.sec_hdr.validation_bits = 0; + rcd.sec_hdr.flags = CPER_SEC_PRIMARY; + rcd.sec_hdr.section_type = CPER_SECTION_TYPE_MCE; + rcd.sec_hdr.section_severity = CPER_SER_FATAL; + + memcpy(&rcd.mce, m, sizeof(*m)); + + return erst_write(&rcd.hdr); +} + +ssize_t apei_read_mce(struct mce *m, u64 *record_id) +{ + struct cper_mce_record rcd; + ssize_t len; + + len = erst_read_next(&rcd.hdr, sizeof(rcd)); + if (len <= 0) + return len; + /* Can not skip other records in storage via ERST unless clear them */ + else if (len != sizeof(rcd) || + uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) { + if (printk_ratelimit()) + pr_warning( + "MCE-APEI: Can not skip the unknown record in ERST"); + return -EIO; + } + + memcpy(m, &rcd.mce, sizeof(*m)); + *record_id = rcd.hdr.record_id; + + return sizeof(*m); +} + +/* Check whether there is record in ERST */ +int apei_check_mce(void) +{ + return erst_get_record_count(); +} + +int apei_clear_mce(u64 record_id) +{ + return erst_clear(record_id); +} diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 32996f9fab67..fefcc69ee8b5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -28,3 +28,26 @@ extern int mce_ser; extern struct mce_bank *mce_banks; +#ifdef CONFIG_ACPI_APEI +int apei_write_mce(struct mce *m); +ssize_t apei_read_mce(struct mce *m, u64 *record_id); +int apei_check_mce(void); +int apei_clear_mce(u64 record_id); +#else +static inline int apei_write_mce(struct mce *m) +{ + return -EINVAL; +} +static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id) +{ + return 0; +} +static inline int apei_check_mce(void) +{ + return 0; +} +static inline int apei_clear_mce(u64 record_id) +{ + return -EINVAL; +} +#endif diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7a355ddcc64b..707165dbc203 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -264,7 +264,7 @@ static void wait_for_panic(void) static void mce_panic(char *msg, struct mce *final, char *exp) { - int i; + int i, apei_err = 0; if (!fake_panic) { /* @@ -287,8 +287,11 @@ static void mce_panic(char *msg, struct mce *final, char *exp) struct mce *m = &mcelog.entry[i]; if (!(m->status & MCI_STATUS_VAL)) continue; - if (!(m->status & MCI_STATUS_UC)) + if (!(m->status & MCI_STATUS_UC)) { print_mce(m); + if (!apei_err) + apei_err = apei_write_mce(m); + } } /* Now print uncorrected but with the final one last */ for (i = 0; i < MCE_LOG_LEN; i++) { @@ -297,11 +300,17 @@ static void mce_panic(char *msg, struct mce *final, char *exp) continue; if (!(m->status & MCI_STATUS_UC)) continue; - if (!final || memcmp(m, final, sizeof(struct mce))) + if (!final || memcmp(m, final, sizeof(struct mce))) { print_mce(m); + if (!apei_err) + apei_err = apei_write_mce(m); + } } - if (final) + if (final) { print_mce(final); + if (!apei_err) + apei_err = apei_write_mce(final); + } if (cpu_missing) printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n"); print_mce_tail(); @@ -1493,6 +1502,43 @@ static void collect_tscs(void *data) rdtscll(cpu_tsc[smp_processor_id()]); } +static int mce_apei_read_done; + +/* Collect MCE record of previous boot in persistent storage via APEI ERST. */ +static int __mce_read_apei(char __user **ubuf, size_t usize) +{ + int rc; + u64 record_id; + struct mce m; + + if (usize < sizeof(struct mce)) + return -EINVAL; + + rc = apei_read_mce(&m, &record_id); + /* Error or no more MCE record */ + if (rc <= 0) { + mce_apei_read_done = 1; + return rc; + } + rc = -EFAULT; + if (copy_to_user(*ubuf, &m, sizeof(struct mce))) + return rc; + /* + * In fact, we should have cleared the record after that has + * been flushed to the disk or sent to network in + * /sbin/mcelog, but we have no interface to support that now, + * so just clear it to avoid duplication. + */ + rc = apei_clear_mce(record_id); + if (rc) { + mce_apei_read_done = 1; + return rc; + } + *ubuf += sizeof(struct mce); + + return 0; +} + static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) { @@ -1506,15 +1552,19 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, return -ENOMEM; mutex_lock(&mce_read_mutex); + + if (!mce_apei_read_done) { + err = __mce_read_apei(&buf, usize); + if (err || buf != ubuf) + goto out; + } + next = rcu_dereference_check_mce(mcelog.next); /* Only supports full reads right now */ - if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { - mutex_unlock(&mce_read_mutex); - kfree(cpu_tsc); - - return -EINVAL; - } + err = -EINVAL; + if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) + goto out; err = 0; prev = 0; @@ -1562,10 +1612,15 @@ timeout: memset(&mcelog.entry[i], 0, sizeof(struct mce)); } } + + if (err) + err = -EFAULT; + +out: mutex_unlock(&mce_read_mutex); kfree(cpu_tsc); - return err ? -EFAULT : buf - ubuf; + return err ? err : buf - ubuf; } static unsigned int mce_poll(struct file *file, poll_table *wait) @@ -1573,6 +1628,8 @@ static unsigned int mce_poll(struct file *file, poll_table *wait) poll_wait(file, &mce_wait, wait); if (rcu_dereference_check_mce(mcelog.next)) return POLLIN | POLLRDNORM; + if (!mce_apei_read_done && apei_check_mce()) + return POLLIN | POLLRDNORM; return 0; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 81c499eceb21..e1a0a3bf9716 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -190,7 +190,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, mutex_unlock(&therm_cpu_lock); break; } - return err ? NOTIFY_BAD : NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fd4db0db3708..c77586061bcb 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1717,7 +1717,11 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski */ regs->bp = rewind_frame_pointer(skip + 1); regs->cs = __KERNEL_CS; - local_save_flags(regs->flags); + /* + * We abuse bit 3 to pass exact information, see perf_misc_flags + * and the comment with PERF_EFLAGS_EXACT. + */ + regs->flags = 0; } unsigned long perf_instruction_pointer(struct pt_regs *regs) diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 424fc8de68e4..ae85d69644d1 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -465,15 +465,21 @@ out: return rc; } -static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) +static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) { - unsigned long dummy; + int overflow = 0; + u32 low, high; - rdmsrl(hwc->config_base + hwc->idx, dummy); - if (dummy & P4_CCCR_OVF) { + rdmsr(hwc->config_base + hwc->idx, low, high); + + /* we need to check high bit for unflagged overflows */ + if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) { + overflow = 1; (void)checking_wrmsrl(hwc->config_base + hwc->idx, - ((u64)dummy) & ~P4_CCCR_OVF); + ((u64)low) & ~P4_CCCR_OVF); } + + return overflow; } static inline void p4_pmu_disable_event(struct perf_event *event) @@ -584,21 +590,15 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) WARN_ON_ONCE(hwc->idx != idx); - /* - * FIXME: Redundant call, actually not needed - * but just to check if we're screwed - */ - p4_pmu_clear_cccr_ovf(hwc); + /* it might be unflagged overflow */ + handled = p4_pmu_clear_cccr_ovf(hwc); val = x86_perf_event_update(event); - if (val & (1ULL << (x86_pmu.cntval_bits - 1))) + if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) continue; - /* - * event overflow - */ - handled = 1; - data.period = event->hw.last_period; + /* event overflow for sure */ + data.period = event->hw.last_period; if (!x86_perf_event_set_period(event)) continue; @@ -670,7 +670,7 @@ static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu) /* * ESCR address hashing is tricky, ESCRs are not sequential - * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and + * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and * the metric between any ESCRs is laid in range [0xa0,0xe1] * * so we make ~70% filled hashtable @@ -735,8 +735,9 @@ static int p4_get_escr_idx(unsigned int addr) { unsigned int idx = P4_ESCR_MSR_IDX(addr); - if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE || - !p4_escr_table[idx])) { + if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE || + !p4_escr_table[idx] || + p4_escr_table[idx] != addr)) { WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr); return -1; } @@ -762,7 +763,7 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign { unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)]; - int cpu = raw_smp_processor_id(); + int cpu = smp_processor_id(); struct hw_perf_event *hwc; struct p4_event_bind *bind; unsigned int i, thread, num; diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 8b862d5900fe..1b7b31ab7d86 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -170,7 +170,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, cpuid_device_destroy(cpu); break; } - return err ? NOTIFY_BAD : NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block __refdata cpuid_class_cpu_notifier = diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb9cd0e..43e9ccf44947 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 4d4468e9f47c..7bf2dc4c8f70 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -230,7 +230,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, msr_device_destroy(cpu); break; } - return err ? NOTIFY_BAD : NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block __refdata msr_class_cpu_notifier = { diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 7d2829dde20e..a5bc528d4328 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -31,8 +31,6 @@ static struct dma_map_ops swiotlb_dma_ops = { .free_coherent = swiotlb_free_coherent, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .map_sg = swiotlb_map_sg_attrs, diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e8029896309a..b4ae4acbd031 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), }, }, + /* + * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so + * match on the product name. + */ + { + .callback = dmi_low_memory_corruption, + .ident = "Phoenix BIOS", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + }, #endif {} }; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ef6370b00e70..cf592767812e 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -247,7 +247,7 @@ void __init setup_per_cpu_areas(void) #endif #endif /* - * Up to this point, the boot CPU has been using .data.init + * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. */ if (cpu == boot_cpu_id) @@ -265,10 +265,10 @@ void __init setup_per_cpu_areas(void) #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) /* - * make sure boot cpu node_number is right, when boot cpu is on the + * make sure boot cpu numa_node is right, when boot cpu is on the * node that doesn't have mem installed */ - per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id); + set_cpu_numa_node(boot_cpu_id, early_cpu_to_node(boot_cpu_id)); #endif /* Setup node to cpumask map */ diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 763d815e27a0..37462f1ddba5 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1215,9 +1215,17 @@ __init void prefill_possible_map(void) if (!num_processors) num_processors = 1; - if (setup_possible_cpus == -1) - possible = num_processors + disabled_cpus; - else + i = setup_max_cpus ?: 1; + if (setup_possible_cpus == -1) { + possible = num_processors; +#ifdef CONFIG_HOTPLUG_CPU + if (setup_max_cpus) + possible += disabled_cpus; +#else + if (possible > i) + possible = i; +#endif + } else possible = setup_possible_cpus; total_cpus = max_t(int, possible, num_processors + disabled_cpus); @@ -1230,11 +1238,23 @@ __init void prefill_possible_map(void) possible = nr_cpu_ids; } +#ifdef CONFIG_HOTPLUG_CPU + if (!setup_max_cpus) +#endif + if (possible > i) { + printk(KERN_WARNING + "%d Processors exceeds max_cpus limit of %u\n", + possible, setup_max_cpus); + possible = i; + } + printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max_t(int, possible - num_processors, 0)); for (i = 0; i < possible; i++) set_cpu_possible(i, true); + for (; i < NR_CPUS; i++) + set_cpu_possible(i, false); nr_cpu_ids = possible; } diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2cc249718c46..d0bb52296fa3 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -97,7 +97,7 @@ SECTIONS HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); - *(.text.page_aligned) + *(.text..page_aligned) #endif . = ALIGN(8); _stext = .; @@ -305,7 +305,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; - *(.bss.page_aligned) + *(.bss..page_aligned) *(.bss) . = ALIGN(4); __bss_stop = .; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 2bdf628066bd..9257510b4836 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1390,7 +1390,6 @@ __init void lguest_init(void) #endif #ifdef CONFIG_ACPI acpi_disabled = 1; - acpi_ht = 0; #endif /* diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 8948f47fde05..a7bcc23ef96c 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -33,9 +33,6 @@ int numa_off __initdata; static unsigned long __initdata nodemap_addr; static unsigned long __initdata nodemap_size; -DEFINE_PER_CPU(int, node_number) = 0; -EXPORT_PER_CPU_SYMBOL(node_number); - /* * Map cpu index to node index */ @@ -809,7 +806,7 @@ void __cpuinit numa_set_node(int cpu, int node) per_cpu(x86_cpu_to_node_map, cpu) = node; if (node != NUMA_NO_NODE) - per_cpu(node_number, cpu) = node; + set_cpu_numa_node(cpu, node); } void __cpuinit numa_clear_node(int cpu) @@ -867,7 +864,7 @@ void __cpuinit numa_remove_cpu(int cpu) numa_set_cpumask(cpu, 0); } -int cpu_to_node(int cpu) +int __cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) { printk(KERN_WARNING @@ -877,7 +874,7 @@ int cpu_to_node(int cpu) } return per_cpu(x86_cpu_to_node_map, cpu); } -EXPORT_SYMBOL(cpu_to_node); +EXPORT_SYMBOL(__cpu_to_node); /* * Same function as cpu_to_node() but used if called before the diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index bbe5502ee1cb..acc15b23b743 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -336,6 +336,7 @@ int free_memtype(u64 start, u64 end) { int err = -EINVAL; int is_range_ram; + struct memtype *entry; if (!pat_enabled) return 0; @@ -355,17 +356,20 @@ int free_memtype(u64 start, u64 end) } spin_lock(&memtype_lock); - err = rbt_memtype_erase(start, end); + entry = rbt_memtype_erase(start, end); spin_unlock(&memtype_lock); - if (err) { + if (!entry) { printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", current->comm, current->pid, start, end); + return -EINVAL; } + kfree(entry); + dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); - return err; + return 0; } diff --git a/arch/x86/mm/pat_internal.h b/arch/x86/mm/pat_internal.h index 4f39eefa3e61..77e5ba153fac 100644 --- a/arch/x86/mm/pat_internal.h +++ b/arch/x86/mm/pat_internal.h @@ -28,15 +28,15 @@ static inline char *cattr_name(unsigned long flags) #ifdef CONFIG_X86_PAT extern int rbt_memtype_check_insert(struct memtype *new, unsigned long *new_type); -extern int rbt_memtype_erase(u64 start, u64 end); +extern struct memtype *rbt_memtype_erase(u64 start, u64 end); extern struct memtype *rbt_memtype_lookup(u64 addr); extern int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos); #else static inline int rbt_memtype_check_insert(struct memtype *new, unsigned long *new_type) { return 0; } -static inline int rbt_memtype_erase(u64 start, u64 end) -{ return 0; } +static inline struct memtype *rbt_memtype_erase(u64 start, u64 end) +{ return NULL; } static inline struct memtype *rbt_memtype_lookup(u64 addr) { return NULL; } static inline int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c index 07de4cb8cc30..f537087bb740 100644 --- a/arch/x86/mm/pat_rbtree.c +++ b/arch/x86/mm/pat_rbtree.c @@ -231,16 +231,17 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) return err; } -int rbt_memtype_erase(u64 start, u64 end) +struct memtype *rbt_memtype_erase(u64 start, u64 end) { struct memtype *data; data = memtype_rb_exact_match(&memtype_rbroot, start, end); if (!data) - return -EINVAL; + goto out; rb_erase(&data->rb, &memtype_rbroot); - return 0; +out: + return data; } struct memtype *rbt_memtype_lookup(u64 addr) diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c index df3d5c861cda..308e32570d84 100644 --- a/arch/x86/mm/pf_in.c +++ b/arch/x86/mm/pf_in.c @@ -34,7 +34,7 @@ /* IA32 Manual 3, 2-1 */ static unsigned char prefix_codes[] = { 0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64, - 0x65, 0x2E, 0x3E, 0x66, 0x67 + 0x65, 0x66, 0x67 }; /* IA32 Manual 3, 3-432*/ static unsigned int reg_rop[] = { diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 792854003ed3..cac718499256 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 31930fd30ea9..2ec04c424a62 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -207,10 +207,9 @@ get_current_resources(struct acpi_device *device, int busnum, if (!info.res) goto res_alloc_fail; - info.name = kmalloc(16, GFP_KERNEL); + info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum); if (!info.name) goto name_alloc_fail; - sprintf(info.name, "PCI Bus %04x:%02x", domain, busnum); info.res_num = 0; acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, @@ -224,8 +223,11 @@ res_alloc_fail: return; } -struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int domain, int busnum) +struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) { + struct acpi_device *device = root->device; + int domain = root->segment; + int busnum = root->secondary.start; struct pci_bus *bus; struct pci_sysdata *sd; int node; diff --git a/arch/xtensa/include/asm/scatterlist.h b/arch/xtensa/include/asm/scatterlist.h index 810080bb0a2b..b1f9fdc1d5ba 100644 --- a/arch/xtensa/include/asm/scatterlist.h +++ b/arch/xtensa/include/asm/scatterlist.h @@ -11,28 +11,7 @@ #ifndef _XTENSA_SCATTERLIST_H #define _XTENSA_SCATTERLIST_H -#include - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - +#include #define ISA_DMA_THRESHOLD (~0UL) diff --git a/drivers/Makefile b/drivers/Makefile index f42a03029b7c..91874e048552 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ obj-y += video/ +obj-y += idle/ obj-$(CONFIG_ACPI) += acpi/ obj-$(CONFIG_SFI) += sfi/ # PnP must come after ACPI since it will eventually need to check if acpi @@ -91,7 +92,6 @@ obj-$(CONFIG_EISA) += eisa/ obj-y += lguest/ obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_IDLE) += cpuidle/ -obj-y += idle/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ obj-$(CONFIG_NEW_LEDS) += leds/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 93d2c7971df6..746411518802 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -360,4 +360,13 @@ config ACPI_SBS To compile this driver as a module, choose M here: the modules will be called sbs and sbshc. +config ACPI_HED + tristate "Hardware Error Device" + help + This driver supports the Hardware Error Device (PNP0C33), + which is used to report some hardware errors notified via + SCI, mainly the corrected errors. + +source "drivers/acpi/apei/Kconfig" + endif # ACPI diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index a8d8998dd5c5..6ee33169e1dc 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -19,7 +19,7 @@ obj-y += acpi.o \ # All the builtin files are in the "acpi." module_param namespace. acpi-y += osl.o utils.o reboot.o -acpi-y += hest.o +acpi-y += atomicio.o # sleep related files acpi-y += wakeup.o @@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_SBS) += sbshc.o obj-$(CONFIG_ACPI_SBS) += sbs.o obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o +obj-$(CONFIG_ACPI_HED) += hed.o # processor has its own "processor." module_param namespace processor-y := processor_driver.o processor_throttling.o @@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o + +obj-$(CONFIG_ACPI_APEI) += apei/ diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 62122134693b..d269a8f3329c 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock); #define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) #define CPUID5_ECX_INTERRUPT_BREAK (0x2) static unsigned long power_saving_mwait_eax; + +static unsigned char tsc_detected_unstable; +static unsigned char tsc_marked_unstable; + static void power_saving_mwait_init(void) { unsigned int eax, ebx, ecx, edx; @@ -87,8 +91,8 @@ static void power_saving_mwait_init(void) /*FALL THROUGH*/ default: - /* TSC could halt in idle, so notify users */ - mark_tsc_unstable("TSC halts in idle"); + /* TSC could halt in idle */ + tsc_detected_unstable = 1; } #endif } @@ -168,16 +172,14 @@ static int power_saving_thread(void *data) do_sleep = 0; - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we test - * NEED_RESCHED: - */ - smp_mb(); - expire_time = jiffies + HZ * (100 - idle_pct) / 100; while (!need_resched()) { + if (tsc_detected_unstable && !tsc_marked_unstable) { + /* TSC could halt in idle, so notify users */ + mark_tsc_unstable("TSC halts in idle"); + tsc_marked_unstable = 1; + } local_irq_disable(); cpu = smp_processor_id(); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, @@ -200,8 +202,6 @@ static int power_saving_thread(void *data) } } - current_thread_info()->status |= TS_POLLING; - /* * current sched_rt has threshold for rt task running time. * When a rt task uses 95% CPU time, the rt thread will be diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 7c7bbb4d402c..d5a5efc043bf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_status acpi_enable(void) { - acpi_status status = AE_OK; + acpi_status status; ACPI_FUNCTION_TRACE(acpi_enable); @@ -84,21 +84,30 @@ acpi_status acpi_enable(void) if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in ACPI mode\n")); - } else { - /* Transition to ACPI mode */ + return_ACPI_STATUS(AE_OK); + } - status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); - if (ACPI_FAILURE(status)) { - ACPI_ERROR((AE_INFO, - "Could not transition to ACPI mode")); - return_ACPI_STATUS(status); - } + /* Transition to ACPI mode */ - ACPI_DEBUG_PRINT((ACPI_DB_INIT, - "Transition to ACPI mode successful\n")); + status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); + if (ACPI_FAILURE(status)) { + ACPI_ERROR((AE_INFO, + "Could not transition to ACPI mode")); + return_ACPI_STATUS(status); } - return_ACPI_STATUS(status); + /* Sanity check that transition succeeded */ + + if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) { + ACPI_ERROR((AE_INFO, + "Hardware did not enter ACPI mode")); + return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); + } + + ACPI_DEBUG_PRINT((ACPI_DB_INIT, + "Transition to ACPI mode successful\n")); + + return_ACPI_STATUS(AE_OK); } ACPI_EXPORT_SYMBOL(acpi_enable) diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 679a112a7d26..b44274a0b62c 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode) { acpi_status status; - u32 retry; ACPI_FUNCTION_TRACE(hw_set_mode); @@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode) return_ACPI_STATUS(status); } - /* - * Some hardware takes a LONG time to switch modes. Give them 3 sec to - * do so, but allow faster systems to proceed more quickly. - */ - retry = 3000; - while (retry) { - if (acpi_hw_get_mode() == mode) { - ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "Mode %X successfully enabled\n", - mode)); - return_ACPI_STATUS(AE_OK); - } - acpi_os_stall(1000); - retry--; - } - - ACPI_ERROR((AE_INFO, "Hardware did not change modes")); - return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); + return_ACPI_STATUS(AE_OK); } /******************************************************************************* diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig new file mode 100644 index 000000000000..f8c668f27b5a --- /dev/null +++ b/drivers/acpi/apei/Kconfig @@ -0,0 +1,30 @@ +config ACPI_APEI + bool "ACPI Platform Error Interface (APEI)" + depends on X86 + help + APEI allows to report errors (for example from the chipset) + to the operating system. This improves NMI handling + especially. In addition it supports error serialization and + error injection. + +config ACPI_APEI_GHES + tristate "APEI Generic Hardware Error Source" + depends on ACPI_APEI && X86 + select ACPI_HED + help + Generic Hardware Error Source provides a way to report + platform hardware errors (such as that from chipset). It + works in so called "Firmware First" mode, that is, hardware + errors are reported to firmware firstly, then reported to + Linux by firmware. This way, some non-standard hardware + error registers or non-standard hardware link can be checked + by firmware to produce more valuable hardware error + information for Linux. + +config ACPI_APEI_EINJ + tristate "APEI Error INJection (EINJ)" + depends on ACPI_APEI && DEBUG_FS + help + EINJ provides a hardware error injection mechanism, it is + mainly used for debugging and testing the other parts of + APEI and some other RAS features. diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile new file mode 100644 index 000000000000..b13b03a17789 --- /dev/null +++ b/drivers/acpi/apei/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_ACPI_APEI) += apei.o +obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o +obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o + +apei-y := apei-base.o hest.o cper.o erst.o diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c new file mode 100644 index 000000000000..db3946e9c66b --- /dev/null +++ b/drivers/acpi/apei/apei-base.c @@ -0,0 +1,593 @@ +/* + * apei-base.c - ACPI Platform Error Interface (APEI) supporting + * infrastructure + * + * APEI allows to report errors (for example from the chipset) to the + * the operating system. This improves NMI handling especially. In + * addition it supports error serialization and error injection. + * + * For more information about APEI, please refer to ACPI Specification + * version 4.0, chapter 17. + * + * This file has Common functions used by more than one APEI table, + * including framework of interpreter for ERST and EINJ; resource + * management for APEI registers. + * + * Copyright (C) 2009, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apei-internal.h" + +#define APEI_PFX "APEI: " + +/* + * APEI ERST (Error Record Serialization Table) and EINJ (Error + * INJection) interpreter framework. + */ + +#define APEI_EXEC_PRESERVE_REGISTER 0x1 + +void apei_exec_ctx_init(struct apei_exec_context *ctx, + struct apei_exec_ins_type *ins_table, + u32 instructions, + struct acpi_whea_header *action_table, + u32 entries) +{ + ctx->ins_table = ins_table; + ctx->instructions = instructions; + ctx->action_table = action_table; + ctx->entries = entries; +} +EXPORT_SYMBOL_GPL(apei_exec_ctx_init); + +int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val) +{ + int rc; + + rc = acpi_atomic_read(val, &entry->register_region); + if (rc) + return rc; + *val >>= entry->register_region.bit_offset; + *val &= entry->mask; + + return 0; +} + +int apei_exec_read_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val = 0; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + ctx->value = val; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_read_register); + +int apei_exec_read_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + + rc = apei_exec_read_register(ctx, entry); + if (rc) + return rc; + ctx->value = (ctx->value == entry->value); + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_read_register_value); + +int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val) +{ + int rc; + + val &= entry->mask; + val <<= entry->register_region.bit_offset; + if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) { + u64 valr = 0; + rc = acpi_atomic_read(&valr, &entry->register_region); + if (rc) + return rc; + valr &= ~(entry->mask << entry->register_region.bit_offset); + val |= valr; + } + rc = acpi_atomic_write(val, &entry->register_region); + + return rc; +} + +int apei_exec_write_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_write_register(entry, ctx->value); +} +EXPORT_SYMBOL_GPL(apei_exec_write_register); + +int apei_exec_write_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + + ctx->value = entry->value; + rc = apei_exec_write_register(ctx, entry); + + return rc; +} +EXPORT_SYMBOL_GPL(apei_exec_write_register_value); + +int apei_exec_noop(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_noop); + +/* + * Interpret the specified action. Go through whole action table, + * execute all instructions belong to the action. + */ +int apei_exec_run(struct apei_exec_context *ctx, u8 action) +{ + int rc; + u32 i, ip; + struct acpi_whea_header *entry; + apei_exec_ins_func_t run; + + ctx->ip = 0; + + /* + * "ip" is the instruction pointer of current instruction, + * "ctx->ip" specifies the next instruction to executed, + * instruction "run" function may change the "ctx->ip" to + * implement "goto" semantics. + */ +rewind: + ip = 0; + for (i = 0; i < ctx->entries; i++) { + entry = &ctx->action_table[i]; + if (entry->action != action) + continue; + if (ip == ctx->ip) { + if (entry->instruction >= ctx->instructions || + !ctx->ins_table[entry->instruction].run) { + pr_warning(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + entry->instruction); + return -EINVAL; + } + run = ctx->ins_table[entry->instruction].run; + rc = run(ctx, entry); + if (rc < 0) + return rc; + else if (rc != APEI_EXEC_SET_IP) + ctx->ip++; + } + ip++; + if (ctx->ip < ip) + goto rewind; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_exec_run); + +typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data); + +static int apei_exec_for_each_entry(struct apei_exec_context *ctx, + apei_exec_entry_func_t func, + void *data, + int *end) +{ + u8 ins; + int i, rc; + struct acpi_whea_header *entry; + struct apei_exec_ins_type *ins_table = ctx->ins_table; + + for (i = 0; i < ctx->entries; i++) { + entry = ctx->action_table + i; + ins = entry->instruction; + if (end) + *end = i; + if (ins >= ctx->instructions || !ins_table[ins].run) { + pr_warning(FW_WARN APEI_PFX + "Invalid action table, unknown instruction type: %d\n", + ins); + return -EINVAL; + } + rc = func(ctx, entry, data); + if (rc) + return rc; + } + + return 0; +} + +static int pre_map_gar_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + u8 ins = entry->instruction; + + if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) + return acpi_pre_map_gar(&entry->register_region); + + return 0; +} + +/* + * Pre-map all GARs in action table to make it possible to access them + * in NMI handler. + */ +int apei_exec_pre_map_gars(struct apei_exec_context *ctx) +{ + int rc, end; + + rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback, + NULL, &end); + if (rc) { + struct apei_exec_context ctx_unmap; + memcpy(&ctx_unmap, ctx, sizeof(*ctx)); + ctx_unmap.entries = end; + apei_exec_post_unmap_gars(&ctx_unmap); + } + + return rc; +} +EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars); + +static int post_unmap_gar_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + u8 ins = entry->instruction; + + if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) + acpi_post_unmap_gar(&entry->register_region); + + return 0; +} + +/* Post-unmap all GAR in action table. */ +int apei_exec_post_unmap_gars(struct apei_exec_context *ctx) +{ + return apei_exec_for_each_entry(ctx, post_unmap_gar_callback, + NULL, NULL); +} +EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars); + +/* + * Resource management for GARs in APEI + */ +struct apei_res { + struct list_head list; + unsigned long start; + unsigned long end; +}; + +/* Collect all resources requested, to avoid conflict */ +struct apei_resources apei_resources_all = { + .iomem = LIST_HEAD_INIT(apei_resources_all.iomem), + .ioport = LIST_HEAD_INIT(apei_resources_all.ioport), +}; + +static int apei_res_add(struct list_head *res_list, + unsigned long start, unsigned long size) +{ + struct apei_res *res, *resn, *res_ins = NULL; + unsigned long end = start + size; + + if (end <= start) + return 0; +repeat: + list_for_each_entry_safe(res, resn, res_list, list) { + if (res->start > end || res->end < start) + continue; + else if (end <= res->end && start >= res->start) { + kfree(res_ins); + return 0; + } + list_del(&res->list); + res->start = start = min(res->start, start); + res->end = end = max(res->end, end); + kfree(res_ins); + res_ins = res; + goto repeat; + } + + if (res_ins) + list_add(&res_ins->list, res_list); + else { + res_ins = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res_ins) + return -ENOMEM; + res_ins->start = start; + res_ins->end = end; + list_add(&res_ins->list, res_list); + } + + return 0; +} + +static int apei_res_sub(struct list_head *res_list1, + struct list_head *res_list2) +{ + struct apei_res *res1, *resn1, *res2, *res; + res1 = list_entry(res_list1->next, struct apei_res, list); + resn1 = list_entry(res1->list.next, struct apei_res, list); + while (&res1->list != res_list1) { + list_for_each_entry(res2, res_list2, list) { + if (res1->start >= res2->end || + res1->end <= res2->start) + continue; + else if (res1->end <= res2->end && + res1->start >= res2->start) { + list_del(&res1->list); + kfree(res1); + break; + } else if (res1->end > res2->end && + res1->start < res2->start) { + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + res->start = res2->end; + res->end = res1->end; + res1->end = res2->start; + list_add(&res->list, &res1->list); + resn1 = res; + } else { + if (res1->start < res2->start) + res1->end = res2->start; + else + res1->start = res2->end; + } + } + res1 = resn1; + resn1 = list_entry(resn1->list.next, struct apei_res, list); + } + + return 0; +} + +static void apei_res_clean(struct list_head *res_list) +{ + struct apei_res *res, *resn; + + list_for_each_entry_safe(res, resn, res_list, list) { + list_del(&res->list); + kfree(res); + } +} + +void apei_resources_fini(struct apei_resources *resources) +{ + apei_res_clean(&resources->iomem); + apei_res_clean(&resources->ioport); +} +EXPORT_SYMBOL_GPL(apei_resources_fini); + +static int apei_resources_merge(struct apei_resources *resources1, + struct apei_resources *resources2) +{ + int rc; + struct apei_res *res; + + list_for_each_entry(res, &resources2->iomem, list) { + rc = apei_res_add(&resources1->iomem, res->start, + res->end - res->start); + if (rc) + return rc; + } + list_for_each_entry(res, &resources2->ioport, list) { + rc = apei_res_add(&resources1->ioport, res->start, + res->end - res->start); + if (rc) + return rc; + } + + return 0; +} + +/* + * EINJ has two groups of GARs (EINJ table entry and trigger table + * entry), so common resources are subtracted from the trigger table + * resources before the second requesting. + */ +int apei_resources_sub(struct apei_resources *resources1, + struct apei_resources *resources2) +{ + int rc; + + rc = apei_res_sub(&resources1->iomem, &resources2->iomem); + if (rc) + return rc; + return apei_res_sub(&resources1->ioport, &resources2->ioport); +} +EXPORT_SYMBOL_GPL(apei_resources_sub); + +/* + * IO memory/port rersource management mechanism is used to check + * whether memory/port area used by GARs conflicts with normal memory + * or IO memory/port of devices. + */ +int apei_resources_request(struct apei_resources *resources, + const char *desc) +{ + struct apei_res *res, *res_bak; + struct resource *r; + + apei_resources_sub(resources, &apei_resources_all); + + list_for_each_entry(res, &resources->iomem, list) { + r = request_mem_region(res->start, res->end - res->start, + desc); + if (!r) { + pr_err(APEI_PFX + "Can not request iomem region <%016llx-%016llx> for GARs.\n", + (unsigned long long)res->start, + (unsigned long long)res->end); + res_bak = res; + goto err_unmap_iomem; + } + } + + list_for_each_entry(res, &resources->ioport, list) { + r = request_region(res->start, res->end - res->start, desc); + if (!r) { + pr_err(APEI_PFX + "Can not request ioport region <%016llx-%016llx> for GARs.\n", + (unsigned long long)res->start, + (unsigned long long)res->end); + res_bak = res; + goto err_unmap_ioport; + } + } + + apei_resources_merge(&apei_resources_all, resources); + + return 0; +err_unmap_ioport: + list_for_each_entry(res, &resources->ioport, list) { + if (res == res_bak) + break; + release_mem_region(res->start, res->end - res->start); + } + res_bak = NULL; +err_unmap_iomem: + list_for_each_entry(res, &resources->iomem, list) { + if (res == res_bak) + break; + release_region(res->start, res->end - res->start); + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(apei_resources_request); + +void apei_resources_release(struct apei_resources *resources) +{ + struct apei_res *res; + + list_for_each_entry(res, &resources->iomem, list) + release_mem_region(res->start, res->end - res->start); + list_for_each_entry(res, &resources->ioport, list) + release_region(res->start, res->end - res->start); + + apei_resources_sub(&apei_resources_all, resources); +} +EXPORT_SYMBOL_GPL(apei_resources_release); + +static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr) +{ + u32 width, space_id; + + width = reg->bit_width; + space_id = reg->space_id; + /* Handle possible alignment issues */ + memcpy(paddr, ®->address, sizeof(*paddr)); + if (!*paddr) { + pr_warning(FW_BUG APEI_PFX + "Invalid physical address in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { + pr_warning(FW_BUG APEI_PFX + "Invalid bit width in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && + space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + pr_warning(FW_BUG APEI_PFX + "Invalid address space type in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + return 0; +} + +static int collect_res_callback(struct apei_exec_context *ctx, + struct acpi_whea_header *entry, + void *data) +{ + struct apei_resources *resources = data; + struct acpi_generic_address *reg = &entry->register_region; + u8 ins = entry->instruction; + u64 paddr; + int rc; + + if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)) + return 0; + + rc = apei_check_gar(reg, &paddr); + if (rc) + return rc; + + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return apei_res_add(&resources->iomem, paddr, + reg->bit_width / 8); + case ACPI_ADR_SPACE_SYSTEM_IO: + return apei_res_add(&resources->ioport, paddr, + reg->bit_width / 8); + default: + return -EINVAL; + } +} + +/* + * Same register may be used by multiple instructions in GARs, so + * resources are collected before requesting. + */ +int apei_exec_collect_resources(struct apei_exec_context *ctx, + struct apei_resources *resources) +{ + return apei_exec_for_each_entry(ctx, collect_res_callback, + resources, NULL); +} +EXPORT_SYMBOL_GPL(apei_exec_collect_resources); + +struct dentry *apei_get_debugfs_dir(void) +{ + static struct dentry *dapei; + + if (!dapei) + dapei = debugfs_create_dir("apei", NULL); + + return dapei; +} +EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h new file mode 100644 index 000000000000..18df1e940276 --- /dev/null +++ b/drivers/acpi/apei/apei-internal.h @@ -0,0 +1,114 @@ +/* + * apei-internal.h - ACPI Platform Error Interface internal + * definations. + */ + +#ifndef APEI_INTERNAL_H +#define APEI_INTERNAL_H + +#include + +struct apei_exec_context; + +typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); + +#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001 + +struct apei_exec_ins_type { + u32 flags; + apei_exec_ins_func_t run; +}; + +struct apei_exec_context { + u32 ip; + u64 value; + u64 var1; + u64 var2; + u64 src_base; + u64 dst_base; + struct apei_exec_ins_type *ins_table; + u32 instructions; + struct acpi_whea_header *action_table; + u32 entries; +}; + +void apei_exec_ctx_init(struct apei_exec_context *ctx, + struct apei_exec_ins_type *ins_table, + u32 instructions, + struct acpi_whea_header *action_table, + u32 entries); + +static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx, + u64 input) +{ + ctx->value = input; +} + +static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) +{ + return ctx->value; +} + +int apei_exec_run(struct apei_exec_context *ctx, u8 action); + +/* Common instruction implementation */ + +/* IP has been set in instruction function */ +#define APEI_EXEC_SET_IP 1 + +int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val); +int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val); +int apei_exec_read_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_read_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_write_register(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_write_register_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_noop(struct apei_exec_context *ctx, + struct acpi_whea_header *entry); +int apei_exec_pre_map_gars(struct apei_exec_context *ctx); +int apei_exec_post_unmap_gars(struct apei_exec_context *ctx); + +struct apei_resources { + struct list_head iomem; + struct list_head ioport; +}; + +static inline void apei_resources_init(struct apei_resources *resources) +{ + INIT_LIST_HEAD(&resources->iomem); + INIT_LIST_HEAD(&resources->ioport); +} + +void apei_resources_fini(struct apei_resources *resources); +int apei_resources_sub(struct apei_resources *resources1, + struct apei_resources *resources2); +int apei_resources_request(struct apei_resources *resources, + const char *desc); +void apei_resources_release(struct apei_resources *resources); +int apei_exec_collect_resources(struct apei_exec_context *ctx, + struct apei_resources *resources); + +struct dentry; +struct dentry *apei_get_debugfs_dir(void); + +#define apei_estatus_for_each_section(estatus, section) \ + for (section = (struct acpi_hest_generic_data *)(estatus + 1); \ + (void *)section - (void *)estatus < estatus->data_length; \ + section = (void *)(section+1) + section->error_data_length) + +static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus) +{ + if (estatus->raw_data_length) + return estatus->raw_data_offset + \ + estatus->raw_data_length; + else + return sizeof(*estatus) + estatus->data_length; +} + +int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); +int apei_estatus_check(const struct acpi_hest_generic_status *estatus); +#endif diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c new file mode 100644 index 000000000000..f4cf2fc4c8c1 --- /dev/null +++ b/drivers/acpi/apei/cper.c @@ -0,0 +1,84 @@ +/* + * UEFI Common Platform Error Record (CPER) support + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying + * + * CPER is the format used to describe platform hardware error by + * various APEI tables, such as ERST, BERT and HEST etc. + * + * For more information about CPER, please refer to Appendix N of UEFI + * Specification version 2.3. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +/* + * CPER record ID need to be unique even after reboot, because record + * ID is used as index for ERST storage, while CPER records from + * multiple boot may co-exist in ERST. + */ +u64 cper_next_record_id(void) +{ + static atomic64_t seq; + + if (!atomic64_read(&seq)) + atomic64_set(&seq, ((u64)get_seconds()) << 32); + + return atomic64_inc_return(&seq); +} +EXPORT_SYMBOL_GPL(cper_next_record_id); + +int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) +{ + if (estatus->data_length && + estatus->data_length < sizeof(struct acpi_hest_generic_data)) + return -EINVAL; + if (estatus->raw_data_length && + estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_estatus_check_header); + +int apei_estatus_check(const struct acpi_hest_generic_status *estatus) +{ + struct acpi_hest_generic_data *gdata; + unsigned int data_len, gedata_len; + int rc; + + rc = apei_estatus_check_header(estatus); + if (rc) + return rc; + data_len = estatus->data_length; + gdata = (struct acpi_hest_generic_data *)(estatus + 1); + while (data_len > sizeof(*gdata)) { + gedata_len = gdata->error_data_length; + if (gedata_len > data_len - sizeof(*gdata)) + return -EINVAL; + data_len -= gedata_len + sizeof(*gdata); + } + if (data_len) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(apei_estatus_check); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c new file mode 100644 index 000000000000..465c885938ee --- /dev/null +++ b/drivers/acpi/apei/einj.c @@ -0,0 +1,548 @@ +/* + * APEI Error INJection support + * + * EINJ provides a hardware error injection mechanism, this is useful + * for debugging and testing of other APEI and RAS features. + * + * For more information about EINJ, please refer to ACPI Specification + * version 4.0, section 17.5. + * + * Copyright 2009-2010 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apei-internal.h" + +#define EINJ_PFX "EINJ: " + +#define SPIN_UNIT 100 /* 100ns */ +/* Firmware should respond within 1 miliseconds */ +#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) + +/* + * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the + * EINJ table through an unpublished extension. Use with caution as + * most will ignore the parameter and make their own choice of address + * for error injection. + */ +struct einj_parameter { + u64 type; + u64 reserved1; + u64 reserved2; + u64 param1; + u64 param2; +}; + +#define EINJ_OP_BUSY 0x1 +#define EINJ_STATUS_SUCCESS 0x0 +#define EINJ_STATUS_FAIL 0x1 +#define EINJ_STATUS_INVAL 0x2 + +#define EINJ_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_einj))) + +static struct acpi_table_einj *einj_tab; + +static struct apei_resources einj_resources; + +static struct apei_exec_ins_type einj_ins_type[] = { + [ACPI_EINJ_READ_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_EINJ_READ_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_EINJ_WRITE_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_EINJ_WRITE_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_EINJ_NOOP] = { + .flags = 0, + .run = apei_exec_noop, + }, +}; + +/* + * Prevent EINJ interpreter to run simultaneously, because the + * corresponding firmware implementation may not work properly when + * invoked simultaneously. + */ +static DEFINE_MUTEX(einj_mutex); + +static struct einj_parameter *einj_param; + +static void einj_exec_ctx_init(struct apei_exec_context *ctx) +{ + apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), + EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); +} + +static int __einj_get_available_error_type(u32 *type) +{ + struct apei_exec_context ctx; + int rc; + + einj_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE); + if (rc) + return rc; + *type = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +/* Get error injection capabilities of the platform */ +static int einj_get_available_error_type(u32 *type) +{ + int rc; + + mutex_lock(&einj_mutex); + rc = __einj_get_available_error_type(type); + mutex_unlock(&einj_mutex); + + return rc; +} + +static int einj_timedout(u64 *t) +{ + if ((s64)*t < SPIN_UNIT) { + pr_warning(FW_WARN EINJ_PFX + "Firmware does not respond in time\n"); + return 1; + } + *t -= SPIN_UNIT; + ndelay(SPIN_UNIT); + touch_nmi_watchdog(); + return 0; +} + +static u64 einj_get_parameter_address(void) +{ + int i; + u64 paddr = 0; + struct acpi_whea_header *entry; + + entry = EINJ_TAB_ENTRY(einj_tab); + for (i = 0; i < einj_tab->entries; i++) { + if (entry->action == ACPI_EINJ_SET_ERROR_TYPE && + entry->instruction == ACPI_EINJ_WRITE_REGISTER && + entry->register_region.space_id == + ACPI_ADR_SPACE_SYSTEM_MEMORY) + memcpy(&paddr, &entry->register_region.address, + sizeof(paddr)); + entry++; + } + + return paddr; +} + +/* do sanity check to trigger table */ +static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) +{ + if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) + return -EINVAL; + if (trigger_tab->table_size > PAGE_SIZE || + trigger_tab->table_size <= trigger_tab->header_size) + return -EINVAL; + if (trigger_tab->entry_count != + (trigger_tab->table_size - trigger_tab->header_size) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +/* Execute instructions in trigger error action table */ +static int __einj_error_trigger(u64 trigger_paddr) +{ + struct acpi_einj_trigger *trigger_tab = NULL; + struct apei_exec_context trigger_ctx; + struct apei_resources trigger_resources; + struct acpi_whea_header *trigger_entry; + struct resource *r; + u32 table_size; + int rc = -EIO; + + r = request_mem_region(trigger_paddr, sizeof(*trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err(EINJ_PFX + "Can not request iomem region <%016llx-%016llx> for Trigger table.\n", + (unsigned long long)trigger_paddr, + (unsigned long long)trigger_paddr+sizeof(*trigger_tab)); + goto out; + } + trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab)); + if (!trigger_tab) { + pr_err(EINJ_PFX "Failed to map trigger table!\n"); + goto out_rel_header; + } + rc = einj_check_trigger_header(trigger_tab); + if (rc) { + pr_warning(FW_BUG EINJ_PFX + "The trigger error action table is invalid\n"); + goto out_rel_header; + } + rc = -EIO; + table_size = trigger_tab->table_size; + r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), + table_size - sizeof(*trigger_tab), + "APEI EINJ Trigger Table"); + if (!r) { + pr_err(EINJ_PFX +"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n", + (unsigned long long)trigger_paddr+sizeof(*trigger_tab), + (unsigned long long)trigger_paddr + table_size); + goto out_rel_header; + } + iounmap(trigger_tab); + trigger_tab = ioremap_cache(trigger_paddr, table_size); + if (!trigger_tab) { + pr_err(EINJ_PFX "Failed to map trigger table!\n"); + goto out_rel_entry; + } + trigger_entry = (struct acpi_whea_header *) + ((char *)trigger_tab + sizeof(struct acpi_einj_trigger)); + apei_resources_init(&trigger_resources); + apei_exec_ctx_init(&trigger_ctx, einj_ins_type, + ARRAY_SIZE(einj_ins_type), + trigger_entry, trigger_tab->entry_count); + rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources); + if (rc) + goto out_fini; + rc = apei_resources_sub(&trigger_resources, &einj_resources); + if (rc) + goto out_fini; + rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger"); + if (rc) + goto out_fini; + rc = apei_exec_pre_map_gars(&trigger_ctx); + if (rc) + goto out_release; + + rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR); + + apei_exec_post_unmap_gars(&trigger_ctx); +out_release: + apei_resources_release(&trigger_resources); +out_fini: + apei_resources_fini(&trigger_resources); +out_rel_entry: + release_mem_region(trigger_paddr + sizeof(*trigger_tab), + table_size - sizeof(*trigger_tab)); +out_rel_header: + release_mem_region(trigger_paddr, sizeof(*trigger_tab)); +out: + if (trigger_tab) + iounmap(trigger_tab); + + return rc; +} + +static int __einj_error_inject(u32 type, u64 param1, u64 param2) +{ + struct apei_exec_context ctx; + u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; + int rc; + + einj_exec_ctx_init(&ctx); + + rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, type); + rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE); + if (rc) + return rc; + if (einj_param) { + writeq(param1, &einj_param->param1); + writeq(param2, &einj_param->param2); + } + rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!(val & EINJ_OP_BUSY)) + break; + if (einj_timedout(&timeout)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (val != EINJ_STATUS_SUCCESS) + return -EBUSY; + + rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE); + if (rc) + return rc; + trigger_paddr = apei_exec_ctx_get_output(&ctx); + rc = __einj_error_trigger(trigger_paddr); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); + + return rc; +} + +/* Inject the specified hardware error */ +static int einj_error_inject(u32 type, u64 param1, u64 param2) +{ + int rc; + + mutex_lock(&einj_mutex); + rc = __einj_error_inject(type, param1, param2); + mutex_unlock(&einj_mutex); + + return rc; +} + +static u32 error_type; +static u64 error_param1; +static u64 error_param2; +static struct dentry *einj_debug_dir; + +static int available_error_type_show(struct seq_file *m, void *v) +{ + int rc; + u32 available_error_type = 0; + + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (available_error_type & 0x0001) + seq_printf(m, "0x00000001\tProcessor Correctable\n"); + if (available_error_type & 0x0002) + seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n"); + if (available_error_type & 0x0004) + seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n"); + if (available_error_type & 0x0008) + seq_printf(m, "0x00000008\tMemory Correctable\n"); + if (available_error_type & 0x0010) + seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n"); + if (available_error_type & 0x0020) + seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n"); + if (available_error_type & 0x0040) + seq_printf(m, "0x00000040\tPCI Express Correctable\n"); + if (available_error_type & 0x0080) + seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n"); + if (available_error_type & 0x0100) + seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n"); + if (available_error_type & 0x0200) + seq_printf(m, "0x00000200\tPlatform Correctable\n"); + if (available_error_type & 0x0400) + seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n"); + if (available_error_type & 0x0800) + seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n"); + + return 0; +} + +static int available_error_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, available_error_type_show, NULL); +} + +static const struct file_operations available_error_type_fops = { + .open = available_error_type_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int error_type_get(void *data, u64 *val) +{ + *val = error_type; + + return 0; +} + +static int error_type_set(void *data, u64 val) +{ + int rc; + u32 available_error_type = 0; + + /* Only one error type can be specified */ + if (val & (val - 1)) + return -EINVAL; + rc = einj_get_available_error_type(&available_error_type); + if (rc) + return rc; + if (!(val & available_error_type)) + return -EINVAL; + error_type = val; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, + error_type_set, "0x%llx\n"); + +static int error_inject_set(void *data, u64 val) +{ + if (!error_type) + return -EINVAL; + + return einj_error_inject(error_type, error_param1, error_param2); +} + +DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, + error_inject_set, "%llu\n"); + +static int einj_check_table(struct acpi_table_einj *einj_tab) +{ + if (einj_tab->header_length != sizeof(struct acpi_table_einj)) + return -EINVAL; + if (einj_tab->header.length < sizeof(struct acpi_table_einj)) + return -EINVAL; + if (einj_tab->entries != + (einj_tab->header.length - sizeof(struct acpi_table_einj)) / + sizeof(struct acpi_einj_entry)) + return -EINVAL; + + return 0; +} + +static int __init einj_init(void) +{ + int rc; + u64 param_paddr; + acpi_status status; + struct dentry *fentry; + struct apei_exec_context ctx; + + if (acpi_disabled) + return -ENODEV; + + status = acpi_get_table(ACPI_SIG_EINJ, 0, + (struct acpi_table_header **)&einj_tab); + if (status == AE_NOT_FOUND) { + pr_info(EINJ_PFX "Table is not found!\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(EINJ_PFX "Failed to get table, %s\n", msg); + return -EINVAL; + } + + rc = einj_check_table(einj_tab); + if (rc) { + pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n"); + return -EINVAL; + } + + rc = -ENOMEM; + einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); + if (!einj_debug_dir) + goto err_cleanup; + fentry = debugfs_create_file("available_error_type", S_IRUSR, + einj_debug_dir, NULL, + &available_error_type_fops); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, + einj_debug_dir, NULL, &error_type_fops); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param1); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param2); + if (!fentry) + goto err_cleanup; + fentry = debugfs_create_file("error_inject", S_IWUSR, + einj_debug_dir, NULL, &error_inject_fops); + if (!fentry) + goto err_cleanup; + + apei_resources_init(&einj_resources); + einj_exec_ctx_init(&ctx); + rc = apei_exec_collect_resources(&ctx, &einj_resources); + if (rc) + goto err_fini; + rc = apei_resources_request(&einj_resources, "APEI EINJ"); + if (rc) + goto err_fini; + rc = apei_exec_pre_map_gars(&ctx); + if (rc) + goto err_release; + param_paddr = einj_get_parameter_address(); + if (param_paddr) { + einj_param = ioremap(param_paddr, sizeof(*einj_param)); + rc = -ENOMEM; + if (!einj_param) + goto err_unmap; + } + + pr_info(EINJ_PFX "Error INJection is initialized.\n"); + + return 0; + +err_unmap: + apei_exec_post_unmap_gars(&ctx); +err_release: + apei_resources_release(&einj_resources); +err_fini: + apei_resources_fini(&einj_resources); +err_cleanup: + debugfs_remove_recursive(einj_debug_dir); + + return rc; +} + +static void __exit einj_exit(void) +{ + struct apei_exec_context ctx; + + if (einj_param) + iounmap(einj_param); + einj_exec_ctx_init(&ctx); + apei_exec_post_unmap_gars(&ctx); + apei_resources_release(&einj_resources); + apei_resources_fini(&einj_resources); + debugfs_remove_recursive(einj_debug_dir); +} + +module_init(einj_init); +module_exit(einj_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Error INJection support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c new file mode 100644 index 000000000000..2ebc39115507 --- /dev/null +++ b/drivers/acpi/apei/erst.c @@ -0,0 +1,855 @@ +/* + * APEI Error Record Serialization Table support + * + * ERST is a way provided by APEI to save and retrieve hardware error + * infomation to and from a persistent store. + * + * For more information about ERST, please refer to ACPI Specification + * version 4.0, section 17.4. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apei-internal.h" + +#define ERST_PFX "ERST: " + +/* ERST command status */ +#define ERST_STATUS_SUCCESS 0x0 +#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1 +#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2 +#define ERST_STATUS_FAILED 0x3 +#define ERST_STATUS_RECORD_STORE_EMPTY 0x4 +#define ERST_STATUS_RECORD_NOT_FOUND 0x5 + +#define ERST_TAB_ENTRY(tab) \ + ((struct acpi_whea_header *)((char *)(tab) + \ + sizeof(struct acpi_table_erst))) + +#define SPIN_UNIT 100 /* 100ns */ +/* Firmware should respond within 1 miliseconds */ +#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC) +#define FIRMWARE_MAX_STALL 50 /* 50us */ + +int erst_disable; +EXPORT_SYMBOL_GPL(erst_disable); + +static struct acpi_table_erst *erst_tab; + +/* ERST Error Log Address Range atrributes */ +#define ERST_RANGE_RESERVED 0x0001 +#define ERST_RANGE_NVRAM 0x0002 +#define ERST_RANGE_SLOW 0x0004 + +/* + * ERST Error Log Address Range, used as buffer for reading/writing + * error records. + */ +static struct erst_erange { + u64 base; + u64 size; + void __iomem *vaddr; + u32 attr; +} erst_erange; + +/* + * Prevent ERST interpreter to run simultaneously, because the + * corresponding firmware implementation may not work properly when + * invoked simultaneously. + * + * It is used to provide exclusive accessing for ERST Error Log + * Address Range too. + */ +static DEFINE_SPINLOCK(erst_lock); + +static inline int erst_errno(int command_status) +{ + switch (command_status) { + case ERST_STATUS_SUCCESS: + return 0; + case ERST_STATUS_HARDWARE_NOT_AVAILABLE: + return -ENODEV; + case ERST_STATUS_NOT_ENOUGH_SPACE: + return -ENOSPC; + case ERST_STATUS_RECORD_STORE_EMPTY: + case ERST_STATUS_RECORD_NOT_FOUND: + return -ENOENT; + default: + return -EINVAL; + } +} + +static int erst_timedout(u64 *t, u64 spin_unit) +{ + if ((s64)*t < spin_unit) { + pr_warning(FW_WARN ERST_PFX + "Firmware does not respond in time\n"); + return 1; + } + *t -= spin_unit; + ndelay(spin_unit); + touch_nmi_watchdog(); + return 0; +} + +static int erst_exec_load_var1(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->var1); +} + +static int erst_exec_load_var2(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->var2); +} + +static int erst_exec_store_var1(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_write_register(entry, ctx->var1); +} + +static int erst_exec_add(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->var1 += ctx->var2; + return 0; +} + +static int erst_exec_subtract(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->var1 -= ctx->var2; + return 0; +} + +static int erst_exec_add_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + val += ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; +} + +static int erst_exec_subtract_value(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + val -= ctx->value; + rc = __apei_exec_write_register(entry, val); + return rc; +} + +static int erst_exec_stall(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + u64 stall_time; + + if (ctx->value > FIRMWARE_MAX_STALL) { + if (!in_nmi()) + pr_warning(FW_WARN ERST_PFX + "Too long stall time for stall instruction: %llx.\n", + ctx->value); + stall_time = FIRMWARE_MAX_STALL; + } else + stall_time = ctx->value; + udelay(stall_time); + return 0; +} + +static int erst_exec_stall_while_true(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + u64 timeout = FIRMWARE_TIMEOUT; + u64 stall_time; + + if (ctx->var1 > FIRMWARE_MAX_STALL) { + if (!in_nmi()) + pr_warning(FW_WARN ERST_PFX + "Too long stall time for stall while true instruction: %llx.\n", + ctx->var1); + stall_time = FIRMWARE_MAX_STALL; + } else + stall_time = ctx->var1; + + for (;;) { + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + if (val != ctx->value) + break; + if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC)) + return -EIO; + } + return 0; +} + +static int erst_exec_skip_next_instruction_if_true( + struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 val; + + rc = __apei_exec_read_register(entry, &val); + if (rc) + return rc; + if (val == ctx->value) { + ctx->ip += 2; + return APEI_EXEC_SET_IP; + } + + return 0; +} + +static int erst_exec_goto(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + ctx->ip = ctx->value; + return APEI_EXEC_SET_IP; +} + +static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->src_base); +} + +static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + return __apei_exec_read_register(entry, &ctx->dst_base); +} + +static int erst_exec_move_data(struct apei_exec_context *ctx, + struct acpi_whea_header *entry) +{ + int rc; + u64 offset; + + rc = __apei_exec_read_register(entry, &offset); + if (rc) + return rc; + memmove((void *)ctx->dst_base + offset, + (void *)ctx->src_base + offset, + ctx->var2); + + return 0; +} + +static struct apei_exec_ins_type erst_ins_type[] = { + [ACPI_ERST_READ_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register, + }, + [ACPI_ERST_READ_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_read_register_value, + }, + [ACPI_ERST_WRITE_REGISTER] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register, + }, + [ACPI_ERST_WRITE_REGISTER_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = apei_exec_write_register_value, + }, + [ACPI_ERST_NOOP] = { + .flags = 0, + .run = apei_exec_noop, + }, + [ACPI_ERST_LOAD_VAR1] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var1, + }, + [ACPI_ERST_LOAD_VAR2] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_load_var2, + }, + [ACPI_ERST_STORE_VAR1] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_store_var1, + }, + [ACPI_ERST_ADD] = { + .flags = 0, + .run = erst_exec_add, + }, + [ACPI_ERST_SUBTRACT] = { + .flags = 0, + .run = erst_exec_subtract, + }, + [ACPI_ERST_ADD_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_add_value, + }, + [ACPI_ERST_SUBTRACT_VALUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_subtract_value, + }, + [ACPI_ERST_STALL] = { + .flags = 0, + .run = erst_exec_stall, + }, + [ACPI_ERST_STALL_WHILE_TRUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_stall_while_true, + }, + [ACPI_ERST_SKIP_NEXT_IF_TRUE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_skip_next_instruction_if_true, + }, + [ACPI_ERST_GOTO] = { + .flags = 0, + .run = erst_exec_goto, + }, + [ACPI_ERST_SET_SRC_ADDRESS_BASE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_src_address_base, + }, + [ACPI_ERST_SET_DST_ADDRESS_BASE] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_set_dst_address_base, + }, + [ACPI_ERST_MOVE_DATA] = { + .flags = APEI_EXEC_INS_ACCESS_REGISTER, + .run = erst_exec_move_data, + }, +}; + +static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) +{ + apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), + ERST_TAB_ENTRY(erst_tab), erst_tab->entries); +} + +static int erst_get_erange(struct erst_erange *range) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); + if (rc) + return rc; + range->base = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); + if (rc) + return rc; + range->size = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); + if (rc) + return rc; + range->attr = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +static ssize_t __erst_get_record_count(void) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); + if (rc) + return rc; + return apei_exec_ctx_get_output(&ctx); +} + +ssize_t erst_get_record_count(void) +{ + ssize_t count; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + count = __erst_get_record_count(); + spin_unlock_irqrestore(&erst_lock, flags); + + return count; +} +EXPORT_SYMBOL_GPL(erst_get_record_count); + +static int __erst_get_next_record_id(u64 *record_id) +{ + struct apei_exec_context ctx; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); + if (rc) + return rc; + *record_id = apei_exec_ctx_get_output(&ctx); + + return 0; +} + +/* + * Get the record ID of an existing error record on the persistent + * storage. If there is no error record on the persistent storage, the + * returned record_id is APEI_ERST_INVALID_RECORD_ID. + */ +int erst_get_next_record_id(u64 *record_id) +{ + int rc; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(record_id); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_get_next_record_id); + +static int __erst_write_to_storage(u64 offset) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +static int __erst_read_from_storage(u64 record_id, u64 offset) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, offset); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + }; + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +static int __erst_clear_from_storage(u64 record_id) +{ + struct apei_exec_context ctx; + u64 timeout = FIRMWARE_TIMEOUT; + u64 val; + int rc; + + erst_exec_ctx_init(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); + if (rc) + return rc; + apei_exec_ctx_set_input(&ctx, record_id); + rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); + if (rc) + return rc; + rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); + if (rc) + return rc; + for (;;) { + rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + if (!val) + break; + if (erst_timedout(&timeout, SPIN_UNIT)) + return -EIO; + } + rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); + if (rc) + return rc; + val = apei_exec_ctx_get_output(&ctx); + rc = apei_exec_run(&ctx, ACPI_ERST_END); + if (rc) + return rc; + + return erst_errno(val); +} + +/* NVRAM ERST Error Log Address Range is not supported yet */ +static void pr_unimpl_nvram(void) +{ + if (printk_ratelimit()) + pr_warning(ERST_PFX + "NVRAM ERST Log Address Range is not implemented yet\n"); +} + +static int __erst_write_to_nvram(const struct cper_record_header *record) +{ + /* do not print message, because printk is not safe for NMI */ + return -ENOSYS; +} + +static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset) +{ + pr_unimpl_nvram(); + return -ENOSYS; +} + +static int __erst_clear_from_nvram(u64 record_id) +{ + pr_unimpl_nvram(); + return -ENOSYS; +} + +int erst_write(const struct cper_record_header *record) +{ + int rc; + unsigned long flags; + struct cper_record_header *rcd_erange; + + if (erst_disable) + return -ENODEV; + + if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE)) + return -EINVAL; + + if (erst_erange.attr & ERST_RANGE_NVRAM) { + if (!spin_trylock_irqsave(&erst_lock, flags)) + return -EBUSY; + rc = __erst_write_to_nvram(record); + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } + + if (record->record_length > erst_erange.size) + return -EINVAL; + + if (!spin_trylock_irqsave(&erst_lock, flags)) + return -EBUSY; + memcpy(erst_erange.vaddr, record, record->record_length); + rcd_erange = erst_erange.vaddr; + /* signature for serialization system */ + memcpy(&rcd_erange->persistence_information, "ER", 2); + + rc = __erst_write_to_storage(0); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_write); + +static int __erst_read_to_erange(u64 record_id, u64 *offset) +{ + int rc; + + if (erst_erange.attr & ERST_RANGE_NVRAM) + return __erst_read_to_erange_from_nvram( + record_id, offset); + + rc = __erst_read_from_storage(record_id, 0); + if (rc) + return rc; + *offset = 0; + + return 0; +} + +static ssize_t __erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen) +{ + int rc; + u64 offset, len = 0; + struct cper_record_header *rcd_tmp; + + rc = __erst_read_to_erange(record_id, &offset); + if (rc) + return rc; + rcd_tmp = erst_erange.vaddr + offset; + len = rcd_tmp->record_length; + if (len <= buflen) + memcpy(record, rcd_tmp, len); + + return len; +} + +/* + * If return value > buflen, the buffer size is not big enough, + * else if return value < 0, something goes wrong, + * else everything is OK, and return value is record length + */ +ssize_t erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen) +{ + ssize_t len; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); + return len; +} +EXPORT_SYMBOL_GPL(erst_read); + +/* + * If return value > buflen, the buffer size is not big enough, + * else if return value = 0, there is no more record to read, + * else if return value < 0, something goes wrong, + * else everything is OK, and return value is record length + */ +ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) +{ + int rc; + ssize_t len; + unsigned long flags; + u64 record_id; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + rc = __erst_get_next_record_id(&record_id); + if (rc) { + spin_unlock_irqrestore(&erst_lock, flags); + return rc; + } + /* no more record */ + if (record_id == APEI_ERST_INVALID_RECORD_ID) { + spin_unlock_irqrestore(&erst_lock, flags); + return 0; + } + + len = __erst_read(record_id, record, buflen); + spin_unlock_irqrestore(&erst_lock, flags); + + return len; +} +EXPORT_SYMBOL_GPL(erst_read_next); + +int erst_clear(u64 record_id) +{ + int rc; + unsigned long flags; + + if (erst_disable) + return -ENODEV; + + spin_lock_irqsave(&erst_lock, flags); + if (erst_erange.attr & ERST_RANGE_NVRAM) + rc = __erst_clear_from_nvram(record_id); + else + rc = __erst_clear_from_storage(record_id); + spin_unlock_irqrestore(&erst_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(erst_clear); + +static int __init setup_erst_disable(char *str) +{ + erst_disable = 1; + return 0; +} + +__setup("erst_disable", setup_erst_disable); + +static int erst_check_table(struct acpi_table_erst *erst_tab) +{ + if (erst_tab->header_length != sizeof(struct acpi_table_erst)) + return -EINVAL; + if (erst_tab->header.length < sizeof(struct acpi_table_erst)) + return -EINVAL; + if (erst_tab->entries != + (erst_tab->header.length - sizeof(struct acpi_table_erst)) / + sizeof(struct acpi_erst_entry)) + return -EINVAL; + + return 0; +} + +static int __init erst_init(void) +{ + int rc = 0; + acpi_status status; + struct apei_exec_context ctx; + struct apei_resources erst_resources; + struct resource *r; + + if (acpi_disabled) + goto err; + + if (erst_disable) { + pr_info(ERST_PFX + "Error Record Serialization Table (ERST) support is disabled.\n"); + goto err; + } + + status = acpi_get_table(ACPI_SIG_ERST, 0, + (struct acpi_table_header **)&erst_tab); + if (status == AE_NOT_FOUND) { + pr_err(ERST_PFX "Table is not found!\n"); + goto err; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(ERST_PFX "Failed to get table, %s\n", msg); + rc = -EINVAL; + goto err; + } + + rc = erst_check_table(erst_tab); + if (rc) { + pr_err(FW_BUG ERST_PFX "ERST table is invalid\n"); + goto err; + } + + apei_resources_init(&erst_resources); + erst_exec_ctx_init(&ctx); + rc = apei_exec_collect_resources(&ctx, &erst_resources); + if (rc) + goto err_fini; + rc = apei_resources_request(&erst_resources, "APEI ERST"); + if (rc) + goto err_fini; + rc = apei_exec_pre_map_gars(&ctx); + if (rc) + goto err_release; + rc = erst_get_erange(&erst_erange); + if (rc) { + if (rc == -ENODEV) + pr_info(ERST_PFX + "The corresponding hardware device or firmware implementation " + "is not available.\n"); + else + pr_err(ERST_PFX + "Failed to get Error Log Address Range.\n"); + goto err_unmap_reg; + } + + r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST"); + if (!r) { + pr_err(ERST_PFX + "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n", + (unsigned long long)erst_erange.base, + (unsigned long long)erst_erange.base + erst_erange.size); + rc = -EIO; + goto err_unmap_reg; + } + rc = -ENOMEM; + erst_erange.vaddr = ioremap_cache(erst_erange.base, + erst_erange.size); + if (!erst_erange.vaddr) + goto err_release_erange; + + pr_info(ERST_PFX + "Error Record Serialization Table (ERST) support is initialized.\n"); + + return 0; + +err_release_erange: + release_mem_region(erst_erange.base, erst_erange.size); +err_unmap_reg: + apei_exec_post_unmap_gars(&ctx); +err_release: + apei_resources_release(&erst_resources); +err_fini: + apei_resources_fini(&erst_resources); +err: + erst_disable = 1; + return rc; +} + +device_initcall(erst_init); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c new file mode 100644 index 000000000000..fd0cc016a099 --- /dev/null +++ b/drivers/acpi/apei/ghes.c @@ -0,0 +1,427 @@ +/* + * APEI Generic Hardware Error Source support + * + * Generic Hardware Error Source provides a way to report platform + * hardware errors (such as that from chipset). It works in so called + * "Firmware First" mode, that is, hardware errors are reported to + * firmware firstly, then reported to Linux by firmware. This way, + * some non-standard hardware error registers or non-standard hardware + * link can be checked by firmware to produce more hardware error + * information for Linux. + * + * For more information about Generic Hardware Error Source, please + * refer to ACPI Specification version 4.0, section 17.3.2.6 + * + * Now, only SCI notification type and memory errors are + * supported. More notification type and hardware error type will be + * added later. + * + * Copyright 2010 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apei-internal.h" + +#define GHES_PFX "GHES: " + +#define GHES_ESTATUS_MAX_SIZE 65536 + +/* + * One struct ghes is created for each generic hardware error + * source. + * + * It provides the context for APEI hardware error timer/IRQ/SCI/NMI + * handler. Handler for one generic hardware error source is only + * triggered after the previous one is done. So handler can uses + * struct ghes without locking. + * + * estatus: memory buffer for error status block, allocated during + * HEST parsing. + */ +#define GHES_TO_CLEAR 0x0001 + +struct ghes { + struct acpi_hest_generic *generic; + struct acpi_hest_generic_status *estatus; + struct list_head list; + u64 buffer_paddr; + unsigned long flags; +}; + +/* + * Error source lists, one list for each notification method. The + * members in lists are struct ghes. + * + * The list members are only added in HEST parsing and deleted during + * module_exit, that is, single-threaded. So no lock is needed for + * that. + * + * But the mutual exclusion is needed between members adding/deleting + * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is + * used for that. + */ +static LIST_HEAD(ghes_sci); + +static struct ghes *ghes_new(struct acpi_hest_generic *generic) +{ + struct ghes *ghes; + unsigned int error_block_length; + int rc; + + ghes = kzalloc(sizeof(*ghes), GFP_KERNEL); + if (!ghes) + return ERR_PTR(-ENOMEM); + ghes->generic = generic; + INIT_LIST_HEAD(&ghes->list); + rc = acpi_pre_map_gar(&generic->error_status_address); + if (rc) + goto err_free; + error_block_length = generic->error_block_length; + if (error_block_length > GHES_ESTATUS_MAX_SIZE) { + pr_warning(FW_WARN GHES_PFX + "Error status block length is too long: %u for " + "generic hardware error source: %d.\n", + error_block_length, generic->header.source_id); + error_block_length = GHES_ESTATUS_MAX_SIZE; + } + ghes->estatus = kmalloc(error_block_length, GFP_KERNEL); + if (!ghes->estatus) { + rc = -ENOMEM; + goto err_unmap; + } + + return ghes; + +err_unmap: + acpi_post_unmap_gar(&generic->error_status_address); +err_free: + kfree(ghes); + return ERR_PTR(rc); +} + +static void ghes_fini(struct ghes *ghes) +{ + kfree(ghes->estatus); + acpi_post_unmap_gar(&ghes->generic->error_status_address); +} + +enum { + GHES_SER_NO = 0x0, + GHES_SER_CORRECTED = 0x1, + GHES_SER_RECOVERABLE = 0x2, + GHES_SER_PANIC = 0x3, +}; + +static inline int ghes_severity(int severity) +{ + switch (severity) { + case CPER_SER_INFORMATIONAL: + return GHES_SER_NO; + case CPER_SER_CORRECTED: + return GHES_SER_CORRECTED; + case CPER_SER_RECOVERABLE: + return GHES_SER_RECOVERABLE; + case CPER_SER_FATAL: + return GHES_SER_PANIC; + default: + /* Unkown, go panic */ + return GHES_SER_PANIC; + } +} + +/* SCI handler run in work queue, so ioremap can be used here */ +static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, + int from_phys) +{ + void *vaddr; + + vaddr = ioremap_cache(paddr, len); + if (!vaddr) + return -ENOMEM; + if (from_phys) + memcpy(buffer, vaddr, len); + else + memcpy(vaddr, buffer, len); + iounmap(vaddr); + + return 0; +} + +static int ghes_read_estatus(struct ghes *ghes, int silent) +{ + struct acpi_hest_generic *g = ghes->generic; + u64 buf_paddr; + u32 len; + int rc; + + rc = acpi_atomic_read(&buf_paddr, &g->error_status_address); + if (rc) { + if (!silent && printk_ratelimit()) + pr_warning(FW_WARN GHES_PFX +"Failed to read error status block address for hardware error source: %d.\n", + g->header.source_id); + return -EIO; + } + if (!buf_paddr) + return -ENOENT; + + rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, + sizeof(*ghes->estatus), 1); + if (rc) + return rc; + if (!ghes->estatus->block_status) + return -ENOENT; + + ghes->buffer_paddr = buf_paddr; + ghes->flags |= GHES_TO_CLEAR; + + rc = -EIO; + len = apei_estatus_len(ghes->estatus); + if (len < sizeof(*ghes->estatus)) + goto err_read_block; + if (len > ghes->generic->error_block_length) + goto err_read_block; + if (apei_estatus_check_header(ghes->estatus)) + goto err_read_block; + rc = ghes_copy_tofrom_phys(ghes->estatus + 1, + buf_paddr + sizeof(*ghes->estatus), + len - sizeof(*ghes->estatus), 1); + if (rc) + return rc; + if (apei_estatus_check(ghes->estatus)) + goto err_read_block; + rc = 0; + +err_read_block: + if (rc && !silent) + pr_warning(FW_WARN GHES_PFX + "Failed to read error status block!\n"); + return rc; +} + +static void ghes_clear_estatus(struct ghes *ghes) +{ + ghes->estatus->block_status = 0; + if (!(ghes->flags & GHES_TO_CLEAR)) + return; + ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, + sizeof(ghes->estatus->block_status), 0); + ghes->flags &= ~GHES_TO_CLEAR; +} + +static void ghes_do_proc(struct ghes *ghes) +{ + int ser, processed = 0; + struct acpi_hest_generic_data *gdata; + + ser = ghes_severity(ghes->estatus->error_severity); + apei_estatus_for_each_section(ghes->estatus, gdata) { +#ifdef CONFIG_X86_MCE + if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, + CPER_SEC_PLATFORM_MEM)) { + apei_mce_report_mem_error( + ser == GHES_SER_CORRECTED, + (struct cper_sec_mem_err *)(gdata+1)); + processed = 1; + } +#endif + } + + if (!processed && printk_ratelimit()) + pr_warning(GHES_PFX + "Unknown error record from generic hardware error source: %d\n", + ghes->generic->header.source_id); +} + +static int ghes_proc(struct ghes *ghes) +{ + int rc; + + rc = ghes_read_estatus(ghes, 0); + if (rc) + goto out; + ghes_do_proc(ghes); + +out: + ghes_clear_estatus(ghes); + return 0; +} + +static int ghes_notify_sci(struct notifier_block *this, + unsigned long event, void *data) +{ + struct ghes *ghes; + int ret = NOTIFY_DONE; + + rcu_read_lock(); + list_for_each_entry_rcu(ghes, &ghes_sci, list) { + if (!ghes_proc(ghes)) + ret = NOTIFY_OK; + } + rcu_read_unlock(); + + return ret; +} + +static struct notifier_block ghes_notifier_sci = { + .notifier_call = ghes_notify_sci, +}; + +static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + struct acpi_hest_generic *generic; + struct ghes *ghes = NULL; + int rc = 0; + + if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) + return 0; + + generic = (struct acpi_hest_generic *)hest_hdr; + if (!generic->enabled) + return 0; + + if (generic->error_block_length < + sizeof(struct acpi_hest_generic_status)) { + pr_warning(FW_BUG GHES_PFX +"Invalid error block length: %u for generic hardware error source: %d\n", + generic->error_block_length, + generic->header.source_id); + goto err; + } + if (generic->records_to_preallocate == 0) { + pr_warning(FW_BUG GHES_PFX +"Invalid records to preallocate: %u for generic hardware error source: %d\n", + generic->records_to_preallocate, + generic->header.source_id); + goto err; + } + ghes = ghes_new(generic); + if (IS_ERR(ghes)) { + rc = PTR_ERR(ghes); + ghes = NULL; + goto err; + } + switch (generic->notify.type) { + case ACPI_HEST_NOTIFY_POLLED: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via POLL is not supported!\n", + generic->header.source_id); + break; + case ACPI_HEST_NOTIFY_EXTERNAL: + case ACPI_HEST_NOTIFY_LOCAL: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via IRQ is not supported!\n", + generic->header.source_id); + break; + case ACPI_HEST_NOTIFY_SCI: + if (list_empty(&ghes_sci)) + register_acpi_hed_notifier(&ghes_notifier_sci); + list_add_rcu(&ghes->list, &ghes_sci); + break; + case ACPI_HEST_NOTIFY_NMI: + pr_warning(GHES_PFX +"Generic hardware error source: %d notified via NMI is not supported!\n", + generic->header.source_id); + break; + default: + pr_warning(FW_WARN GHES_PFX + "Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); + break; + } + + return 0; +err: + if (ghes) + ghes_fini(ghes); + return rc; +} + +static void ghes_cleanup(void) +{ + struct ghes *ghes, *nghes; + + if (!list_empty(&ghes_sci)) + unregister_acpi_hed_notifier(&ghes_notifier_sci); + + synchronize_rcu(); + + list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) { + list_del(&ghes->list); + ghes_fini(ghes); + kfree(ghes); + } +} + +static int __init ghes_init(void) +{ + int rc; + + if (acpi_disabled) + return -ENODEV; + + if (hest_disable) { + pr_info(GHES_PFX "HEST is not enabled!\n"); + return -EINVAL; + } + + rc = apei_hest_parse(hest_ghes_parse, NULL); + if (rc) { + pr_err(GHES_PFX + "Error during parsing HEST generic hardware error sources.\n"); + goto err_cleanup; + } + + if (list_empty(&ghes_sci)) { + pr_info(GHES_PFX + "No functional generic hardware error sources.\n"); + rc = -ENODEV; + goto err_cleanup; + } + + pr_info(GHES_PFX + "Generic Hardware Error Source support is initialized.\n"); + + return 0; +err_cleanup: + ghes_cleanup(); + return rc; +} + +static void __exit ghes_exit(void) +{ + ghes_cleanup(); +} + +module_init(ghes_init); +module_exit(ghes_exit); + +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("APEI Generic Hardware Error Source support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c new file mode 100644 index 000000000000..e7f40d362cb3 --- /dev/null +++ b/drivers/acpi/apei/hest.c @@ -0,0 +1,173 @@ +/* + * APEI Hardware Error Souce Table support + * + * HEST describes error sources in detail; communicates operational + * parameters (i.e. severity levels, masking bits, and threshold + * values) to Linux as necessary. It also allows the BIOS to report + * non-standard error sources to Linux (for example, chipset-specific + * error registers). + * + * For more information about HEST, please refer to ACPI Specification + * version 4.0, section 17.3.2. + * + * Copyright 2009 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "apei-internal.h" + +#define HEST_PFX "HEST: " + +int hest_disable; +EXPORT_SYMBOL_GPL(hest_disable); + +/* HEST table parsing */ + +static struct acpi_table_hest *hest_tab; + +static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + return 0; +} + +static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { + [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ + [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, + [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), + [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root), + [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer), + [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), + [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), +}; + +static int hest_esrc_len(struct acpi_hest_header *hest_hdr) +{ + u16 hest_type = hest_hdr->type; + int len; + + if (hest_type >= ACPI_HEST_TYPE_RESERVED) + return 0; + + len = hest_esrc_len_tab[hest_type]; + + if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) { + struct acpi_hest_ia_corrected *cmc; + cmc = (struct acpi_hest_ia_corrected *)hest_hdr; + len = sizeof(*cmc) + cmc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); + } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) { + struct acpi_hest_ia_machine_check *mc; + mc = (struct acpi_hest_ia_machine_check *)hest_hdr; + len = sizeof(*mc) + mc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); + } + BUG_ON(len == -1); + + return len; +}; + +int apei_hest_parse(apei_hest_func_t func, void *data) +{ + struct acpi_hest_header *hest_hdr; + int i, rc, len; + + if (hest_disable) + return -EINVAL; + + hest_hdr = (struct acpi_hest_header *)(hest_tab + 1); + for (i = 0; i < hest_tab->error_source_count; i++) { + len = hest_esrc_len(hest_hdr); + if (!len) { + pr_warning(FW_WARN HEST_PFX + "Unknown or unused hardware error source " + "type: %d for hardware error source: %d.\n", + hest_hdr->type, hest_hdr->source_id); + return -EINVAL; + } + if ((void *)hest_hdr + len > + (void *)hest_tab + hest_tab->header.length) { + pr_warning(FW_BUG HEST_PFX + "Table contents overflow for hardware error source: %d.\n", + hest_hdr->source_id); + return -EINVAL; + } + + rc = func(hest_hdr, data); + if (rc) + return rc; + + hest_hdr = (void *)hest_hdr + len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(apei_hest_parse); + +static int __init setup_hest_disable(char *str) +{ + hest_disable = 1; + return 0; +} + +__setup("hest_disable", setup_hest_disable); + +static int __init hest_init(void) +{ + acpi_status status; + int rc = -ENODEV; + + if (acpi_disabled) + goto err; + + if (hest_disable) { + pr_info(HEST_PFX "HEST tabling parsing is disabled.\n"); + goto err; + } + + status = acpi_get_table(ACPI_SIG_HEST, 0, + (struct acpi_table_header **)&hest_tab); + if (status == AE_NOT_FOUND) { + pr_info(HEST_PFX "Table is not found!\n"); + goto err; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err(HEST_PFX "Failed to get table, %s\n", msg); + rc = -EINVAL; + goto err; + } + + rc = apei_hest_parse(hest_void_parse, NULL); + if (rc) + goto err; + + pr_info(HEST_PFX "HEST table parsing is initialized.\n"); + + return 0; +err: + hest_disable = 1; + return rc; +} + +subsys_initcall(hest_init); diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c new file mode 100644 index 000000000000..814b19249616 --- /dev/null +++ b/drivers/acpi/atomicio.c @@ -0,0 +1,360 @@ +/* + * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then + * accessing in atomic context. + * + * This is used for NMI handler to access IO memory area, because + * ioremap/iounmap can not be used in NMI handler. The IO memory area + * is pre-mapped in process context and accessed in NMI handler. + * + * Copyright (C) 2009-2010, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ACPI_PFX "ACPI: " + +static LIST_HEAD(acpi_iomaps); +/* + * Used for mutual exclusion between writers of acpi_iomaps list, for + * synchronization between readers and writer, RCU is used. + */ +static DEFINE_SPINLOCK(acpi_iomaps_lock); + +struct acpi_iomap { + struct list_head list; + void __iomem *vaddr; + unsigned long size; + phys_addr_t paddr; + struct kref ref; +}; + +/* acpi_iomaps_lock or RCU read lock must be held before calling */ +static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + list_for_each_entry_rcu(map, &acpi_iomaps, list) { + if (map->paddr + map->size >= paddr + size && + map->paddr <= paddr) + return map; + } + return NULL; +} + +/* + * Atomic "ioremap" used by NMI handler, if the specified IO memory + * area is not pre-mapped, NULL will be returned. + * + * acpi_iomaps_lock or RCU read lock must be held before calling + */ +static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + map = __acpi_find_iomap(paddr, size); + if (map) + return map->vaddr + (paddr - map->paddr); + else + return NULL; +} + +/* acpi_iomaps_lock must be held before calling */ +static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, + unsigned long size) +{ + struct acpi_iomap *map; + + map = __acpi_find_iomap(paddr, size); + if (map) { + kref_get(&map->ref); + return map->vaddr + (paddr - map->paddr); + } else + return NULL; +} + +/* + * Used to pre-map the specified IO memory area. First try to find + * whether the area is already pre-mapped, if it is, increase the + * reference count (in __acpi_try_ioremap) and return; otherwise, do + * the real ioremap, and add the mapping into acpi_iomaps list. + */ +static void __iomem *acpi_pre_map(phys_addr_t paddr, + unsigned long size) +{ + void __iomem *vaddr; + struct acpi_iomap *map; + unsigned long pg_sz, flags; + phys_addr_t pg_off; + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + vaddr = __acpi_try_ioremap(paddr, size); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + if (vaddr) + return vaddr; + + pg_off = paddr & PAGE_MASK; + pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; + vaddr = ioremap(pg_off, pg_sz); + if (!vaddr) + return NULL; + map = kmalloc(sizeof(*map), GFP_KERNEL); + if (!map) + goto err_unmap; + INIT_LIST_HEAD(&map->list); + map->paddr = pg_off; + map->size = pg_sz; + map->vaddr = vaddr; + kref_init(&map->ref); + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + vaddr = __acpi_try_ioremap(paddr, size); + if (vaddr) { + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + iounmap(map->vaddr); + kfree(map); + return vaddr; + } + list_add_tail_rcu(&map->list, &acpi_iomaps); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + + return vaddr + (paddr - pg_off); +err_unmap: + iounmap(vaddr); + return NULL; +} + +/* acpi_iomaps_lock must be held before calling */ +static void __acpi_kref_del_iomap(struct kref *ref) +{ + struct acpi_iomap *map; + + map = container_of(ref, struct acpi_iomap, ref); + list_del_rcu(&map->list); +} + +/* + * Used to post-unmap the specified IO memory area. The iounmap is + * done only if the reference count goes zero. + */ +static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) +{ + struct acpi_iomap *map; + unsigned long flags; + int del; + + spin_lock_irqsave(&acpi_iomaps_lock, flags); + map = __acpi_find_iomap(paddr, size); + BUG_ON(!map); + del = kref_put(&map->ref, __acpi_kref_del_iomap); + spin_unlock_irqrestore(&acpi_iomaps_lock, flags); + + if (!del) + return; + + synchronize_rcu(); + iounmap(map->vaddr); + kfree(map); +} + +/* In NMI handler, should set silent = 1 */ +static int acpi_check_gar(struct acpi_generic_address *reg, + u64 *paddr, int silent) +{ + u32 width, space_id; + + width = reg->bit_width; + space_id = reg->space_id; + /* Handle possible alignment issues */ + memcpy(paddr, ®->address, sizeof(*paddr)); + if (!*paddr) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid physical address in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid bit width in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && + space_id != ACPI_ADR_SPACE_SYSTEM_IO) { + if (!silent) + pr_warning(FW_BUG ACPI_PFX + "Invalid address space type in GAR [0x%llx/%u/%u]\n", + *paddr, width, space_id); + return -EINVAL; + } + + return 0; +} + +/* Pre-map, working on GAR */ +int acpi_pre_map_gar(struct acpi_generic_address *reg) +{ + u64 paddr; + void __iomem *vaddr; + int rc; + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return 0; + + rc = acpi_check_gar(reg, &paddr, 0); + if (rc) + return rc; + + vaddr = acpi_pre_map(paddr, reg->bit_width / 8); + if (!vaddr) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_pre_map_gar); + +/* Post-unmap, working on GAR */ +int acpi_post_unmap_gar(struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) + return 0; + + rc = acpi_check_gar(reg, &paddr, 0); + if (rc) + return rc; + + acpi_post_unmap(paddr, reg->bit_width / 8); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); + +/* + * Can be used in atomic (including NMI) or process context. RCU read + * lock can only be released after the IO memory area accessing. + */ +static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) +{ + void __iomem *addr; + + rcu_read_lock(); + addr = __acpi_ioremap_fast(paddr, width); + switch (width) { + case 8: + *val = readb(addr); + break; + case 16: + *val = readw(addr); + break; + case 32: + *val = readl(addr); + break; + case 64: + *val = readq(addr); + break; + default: + return -EINVAL; + } + rcu_read_unlock(); + + return 0; +} + +static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) +{ + void __iomem *addr; + + rcu_read_lock(); + addr = __acpi_ioremap_fast(paddr, width); + switch (width) { + case 8: + writeb(val, addr); + break; + case 16: + writew(val, addr); + break; + case 32: + writel(val, addr); + break; + case 64: + writeq(val, addr); + break; + default: + return -EINVAL; + } + rcu_read_unlock(); + + return 0; +} + +/* GAR accessing in atomic (including NMI) or process context */ +int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + rc = acpi_check_gar(reg, &paddr, 1); + if (rc) + return rc; + + *val = 0; + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return acpi_atomic_read_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(acpi_atomic_read); + +int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) +{ + u64 paddr; + int rc; + + rc = acpi_check_gar(reg, &paddr, 1); + if (rc) + return rc; + + switch (reg->space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + return acpi_atomic_write_mem(paddr, val, reg->bit_width); + case ACPI_ADR_SPACE_SYSTEM_IO: + return acpi_os_write_port(paddr, val, reg->bit_width); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(acpi_atomic_write); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f2234db85da0..e61d4f8e62a5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void) /* Don't trust ECDT, which comes from ASUSTek */ if (!EC_FLAGS_VALIDATE_ECDT) goto install; - saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); + saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) return -ENOMEM; - memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); /* fall through */ } diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c new file mode 100644 index 000000000000..d0c1967f7597 --- /dev/null +++ b/drivers/acpi/hed.c @@ -0,0 +1,112 @@ +/* + * ACPI Hardware Error Device (PNP0C33) Driver + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying + * + * ACPI Hardware Error Device is used to report some hardware errors + * notified via SCI, mainly the corrected errors. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct acpi_device_id acpi_hed_ids[] = { + {"PNP0C33", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, acpi_hed_ids); + +static acpi_handle hed_handle; + +static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list); + +int register_acpi_hed_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_hed_notify_list, nb); +} +EXPORT_SYMBOL_GPL(register_acpi_hed_notifier); + +void unregister_acpi_hed_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb); +} +EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier); + +/* + * SCI to report hardware error is forwarded to the listeners of HED, + * it is used by HEST Generic Hardware Error Source with notify type + * SCI. + */ +static void acpi_hed_notify(struct acpi_device *device, u32 event) +{ + blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL); +} + +static int __devinit acpi_hed_add(struct acpi_device *device) +{ + /* Only one hardware error device */ + if (hed_handle) + return -EINVAL; + hed_handle = device->handle; + return 0; +} + +static int __devexit acpi_hed_remove(struct acpi_device *device, int type) +{ + hed_handle = NULL; + return 0; +} + +static struct acpi_driver acpi_hed_driver = { + .name = "hardware_error_device", + .class = "hardware_error", + .ids = acpi_hed_ids, + .ops = { + .add = acpi_hed_add, + .remove = acpi_hed_remove, + .notify = acpi_hed_notify, + }, +}; + +static int __init acpi_hed_init(void) +{ + if (acpi_disabled) + return -ENODEV; + + if (acpi_bus_register_driver(&acpi_hed_driver) < 0) + return -ENODEV; + + return 0; +} + +static void __exit acpi_hed_exit(void) +{ + acpi_bus_unregister_driver(&acpi_hed_driver); +} + +module_init(acpi_hed_init); +module_exit(acpi_hed_exit); + +ACPI_MODULE_NAME("hed"); +MODULE_AUTHOR("Huang Ying"); +MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c deleted file mode 100644 index 1c527a192872..000000000000 --- a/drivers/acpi/hest.c +++ /dev/null @@ -1,139 +0,0 @@ -#include -#include - -#define PREFIX "ACPI: " - -static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p) -{ - return sizeof(*p) + - (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); -} - -static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p) -{ - return sizeof(*p) + - (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks); -} - -static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p) -{ - return sizeof(*p); -} - -static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p) -{ - return sizeof(*p); -} - -static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci) -{ - return (0 == pci_domain_nr(pci->bus) && - p->bus == pci->bus->number && - p->device == PCI_SLOT(pci->devfn) && - p->function == PCI_FUNC(pci->devfn)); -} - -static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first) -{ - struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header); - unsigned long rc=0; - u8 pcie_type = 0; - u8 bridge = 0; - switch (type) { - case ACPI_HEST_TYPE_AER_ROOT_PORT: - rc = sizeof(struct acpi_hest_aer_root); - pcie_type = PCI_EXP_TYPE_ROOT_PORT; - break; - case ACPI_HEST_TYPE_AER_ENDPOINT: - rc = sizeof(struct acpi_hest_aer); - pcie_type = PCI_EXP_TYPE_ENDPOINT; - break; - case ACPI_HEST_TYPE_AER_BRIDGE: - rc = sizeof(struct acpi_hest_aer_bridge); - if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE) - bridge = 1; - break; - } - - if (p->flags & ACPI_HEST_GLOBAL) { - if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge) - *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); - } - else - if (hest_match_pci(p, pci)) - *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); - return rc; -} - -static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci) -{ - struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader; - void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */ - struct acpi_hest_header *hdr = p; - - int i; - int firmware_first = 0; - static unsigned char printed_unused = 0; - static unsigned char printed_reserved = 0; - - for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) { - switch (hdr->type) { - case ACPI_HEST_TYPE_IA32_CHECK: - p += parse_acpi_hest_ia_machine_check(p); - break; - case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK: - p += parse_acpi_hest_ia_corrected(p); - break; - case ACPI_HEST_TYPE_IA32_NMI: - p += parse_acpi_hest_ia_nmi(p); - break; - /* These three should never appear */ - case ACPI_HEST_TYPE_NOT_USED3: - case ACPI_HEST_TYPE_NOT_USED4: - case ACPI_HEST_TYPE_NOT_USED5: - if (!printed_unused) { - printk(KERN_DEBUG PREFIX - "HEST Error Source list contains an obsolete type (%d).\n", hdr->type); - printed_unused = 1; - } - break; - case ACPI_HEST_TYPE_AER_ROOT_PORT: - case ACPI_HEST_TYPE_AER_ENDPOINT: - case ACPI_HEST_TYPE_AER_BRIDGE: - p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first); - break; - case ACPI_HEST_TYPE_GENERIC_ERROR: - p += parse_acpi_hest_generic(p); - break; - /* These should never appear either */ - case ACPI_HEST_TYPE_RESERVED: - default: - if (!printed_reserved) { - printk(KERN_DEBUG PREFIX - "HEST Error Source list contains a reserved type (%d).\n", hdr->type); - printed_reserved = 1; - } - break; - } - } - return firmware_first; -} - -int acpi_hest_firmware_first_pci(struct pci_dev *pci) -{ - acpi_status status = AE_NOT_FOUND; - struct acpi_table_header *hest = NULL; - - if (acpi_disabled) - return 0; - - status = acpi_get_table(ACPI_SIG_HEST, 1, &hest); - - if (ACPI_SUCCESS(status)) { - if (acpi_hest_firmware_first(hest, pci)) { - return 1; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 4bc1c4178f50..78418ce4fc78 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1206,6 +1206,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, } EXPORT_SYMBOL(acpi_check_mem_region); +/* + * Let drivers know whether the resource checks are effective + */ +int acpi_resources_are_enforced(void) +{ + return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; +} +EXPORT_SYMBOL(acpi_resources_are_enforced); + /* * Acquire a spinlock. * diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index aefce33f2a09..4eac59393edc 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus) struct acpi_pci_root *root; list_for_each_entry(root, &acpi_pci_roots, node) - if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) + if ((root->segment == (u16) seg) && + (root->secondary.start == (u16) bus)) return root->device->handle; return NULL; } @@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge); static acpi_status get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) { - int *busnr = data; + struct resource *res = data; struct acpi_resource_address64 address; if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && @@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) acpi_resource_to_address64(resource, &address); if ((address.address_length > 0) && - (address.resource_type == ACPI_BUS_NUMBER_RANGE)) - *busnr = address.minimum; + (address.resource_type == ACPI_BUS_NUMBER_RANGE)) { + res->start = address.minimum; + res->end = address.minimum + address.address_length - 1; + } return AE_OK; } static acpi_status try_get_root_bridge_busnr(acpi_handle handle, - unsigned long long *bus) + struct resource *res) { acpi_status status; - int busnum; - busnum = -1; + res->start = -1; status = acpi_walk_resources(handle, METHOD_NAME__CRS, - get_root_bridge_busnr_callback, &busnum); + get_root_bridge_busnr_callback, res); if (ACPI_FAILURE(status)) return status; - /* Check if we really get a bus number from _CRS */ - if (busnum == -1) + if (res->start == -1) return AE_ERROR; - *bus = busnum; return AE_OK; } @@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) struct acpi_device *child; u32 flags, base_flags; + root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); + if (!root) + return -ENOMEM; + segment = 0; status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, &segment); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); - return -ENODEV; + result = -ENODEV; + goto end; } /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ - bus = 0; - status = try_get_root_bridge_busnr(device->handle, &bus); + root->secondary.flags = IORESOURCE_BUS; + status = try_get_root_bridge_busnr(device->handle, &root->secondary); if (ACPI_FAILURE(status)) { + /* + * We need both the start and end of the downstream bus range + * to interpret _CBA (MMCONFIG base address), so it really is + * supposed to be in _CRS. If we don't find it there, all we + * can do is assume [_BBN-0xFF] or [0-0xFF]. + */ + root->secondary.end = 0xFF; + printk(KERN_WARNING FW_BUG PREFIX + "no secondary bus range in _CRS\n"); status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - printk(KERN_ERR PREFIX - "no bus number in _CRS and can't evaluate _BBN\n"); - return -ENODEV; + if (ACPI_SUCCESS(status)) + root->secondary.start = bus; + else if (status == AE_NOT_FOUND) + root->secondary.start = 0; + else { + printk(KERN_ERR PREFIX "can't evaluate _BBN\n"); + result = -ENODEV; + goto end; } } - root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); - if (!root) - return -ENOMEM; - INIT_LIST_HEAD(&root->node); root->device = device; root->segment = segment & 0xFFFF; - root->bus_nr = bus & 0xFF; strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); device->driver_data = root; @@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) /* TBD: Locking */ list_add_tail(&root->node, &acpi_pci_roots); - printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", + printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n", acpi_device_name(device), acpi_device_bid(device), - root->segment, root->bus_nr); + root->segment, &root->secondary); /* * Scan the Root Bridge @@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) * PCI namespace does not get created until this call is made (and * thus the root bridge's pci_dev does not exist). */ - root->bus = pci_acpi_scan_root(device, segment, bus); + root->bus = pci_acpi_scan_root(root); if (!root->bus) { printk(KERN_ERR PREFIX "Bus %04x:%02x not present in PCI namespace\n", - root->segment, root->bus_nr); + root->segment, (unsigned int)root->secondary.start); result = -ENODEV; goto end; } diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 5675d9747e87..b1034a9ada4e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) acpi_processor_get_limit_info(pr); - acpi_processor_power_init(pr, device); + if (cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr, device); pr->cdev = thermal_cooling_device_register("Processor", device, &processor_cooling_ops); @@ -920,9 +921,14 @@ static int __init acpi_processor_init(void) if (!acpi_processor_dir) return -ENOMEM; #endif - result = cpuidle_register_driver(&acpi_idle_driver); - if (result < 0) - goto out_proc; + + if (!cpuidle_register_driver(&acpi_idle_driver)) { + printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", + acpi_idle_driver.name); + } else { + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", + cpuidle_get_driver()->name); + } result = acpi_bus_register_driver(&acpi_processor_driver); if (result < 0) @@ -941,7 +947,6 @@ static int __init acpi_processor_init(void) out_cpuidle: cpuidle_unregister_driver(&acpi_idle_driver); -out_proc: #ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); #endif diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c3817e1f32c7..2e8c27d48f2b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) break; } - if (pr->power.states[i].promotion.state) - seq_printf(seq, "promotion[C%zd] ", - (pr->power.states[i].promotion.state - - pr->power.states)); - else - seq_puts(seq, "promotion[--] "); - - if (pr->power.states[i].demotion.state) - seq_printf(seq, "demotion[C%zd] ", - (pr->power.states[i].demotion.state - - pr->power.states)); - else - seq_puts(seq, "demotion[--] "); + seq_puts(seq, "promotion[--] "); + + seq_puts(seq, "demotion[--] "); seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", pr->power.states[i].latency, @@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; + s64 idle_time_ns; s64 idle_time; s64 sleep_ticks = 0; @@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, return(acpi_idle_enter_c1(dev, state)); local_irq_disable(); + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, * NEED_RESCHED: */ smp_mb(); - } - if (unlikely(need_resched())) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return 0; + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } } /* @@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); + idle_time = idle_time_ns; + do_div(idle_time, NSEC_PER_USEC); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); - current_thread_info()->status |= TS_POLLING; + if (cx->entry_method != ACPI_CSTATE_FFH) + current_thread_info()->status |= TS_POLLING; cx->usage++; @@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; + s64 idle_time_ns; s64 idle_time; s64 sleep_ticks = 0; @@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, } local_irq_disable(); + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, * NEED_RESCHED: */ smp_mb(); - } - if (unlikely(need_resched())) { - current_thread_info()->status |= TS_POLLING; - local_irq_enable(); - return 0; + if (unlikely(need_resched())) { + current_thread_info()->status |= TS_POLLING; + local_irq_enable(); + return 0; + } } acpi_unlazy_tlb(smp_processor_id()); @@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, spin_unlock(&c3_lock); } kt2 = ktime_get_real(); - idle_time = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1)); + idle_time = idle_time_ns; + do_div(idle_time, NSEC_PER_USEC); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Tell the scheduler how much we idled: */ - sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); + sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); - current_thread_info()->status |= TS_POLLING; + if (cx->entry_method != ACPI_CSTATE_FFH) + current_thread_info()->status |= TS_POLLING; cx->usage++; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index baa76bbf244a..4ab2275b4461 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; -/* - * According to the ACPI specification the BIOS should make sure that ACPI is - * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, - * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI - * on such systems during resume. Unfortunately that doesn't help in - * particularly pathological cases in which SCI_EN has to be set directly on - * resume, although the specification states very clearly that this flag is - * owned by the hardware. The set_sci_en_on_resume variable will be set in such - * cases. - */ -static bool set_sci_en_on_resume; - -void __init acpi_set_sci_en_on_resume(void) -{ - set_sci_en_on_resume = true; -} /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the @@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) break; } - /* If ACPI is not enabled by the BIOS, we need to enable it here. */ - if (set_sci_en_on_resume) - acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); - else - acpi_enable(); + /* This violates the spec but is required for bug compatibility. */ + acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers and execute _BFS */ acpi_leave_sleep_state_prep(acpi_state); @@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) return 0; } -static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d) -{ - set_sci_en_on_resume = true; - return 0; -} - static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, @@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { - .callback = init_set_sci_en_on_resume, - .ident = "Apple MacBook 1,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Apple MacMini 1,1", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"), - }, - }, - { .callback = init_old_suspend_ordering, .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", .matches = { @@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, }, { - .callback = init_set_sci_en_on_resume, - .ident = "Toshiba Satellite L300", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard HP G7000 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Pavilion dv4", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Pavilion dv7", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad T410", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad T510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad W510", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Lenovo ThinkPad X201[s]", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"), - }, - }, - { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { @@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1558", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1557", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"), - }, - }, - { - .callback = init_set_sci_en_on_resume, - .ident = "Dell Studio 1555", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"), - }, - }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 8a8f3b3382a6..25b8bd149284 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -1,6 +1,6 @@ extern u8 sleep_states[]; -extern int acpi_suspend (u32 state); +extern int acpi_suspend(u32 state); extern void acpi_enable_wakeup_device_prep(u8 sleep_state); extern void acpi_enable_wakeup_device(u8 sleep_state); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8a0ed2800e63..f336bca7c450 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id, unsigned long table_end; acpi_size tbl_size; - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return -ENODEV; if (!handler) @@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler) struct acpi_table_header *table = NULL; acpi_size tbl_size; - if (acpi_disabled && !acpi_ht) + if (acpi_disabled) return -ENODEV; if (!handler) diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a0c93b321482..9865d46f49a8 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -45,6 +45,7 @@ #include #include #include +#include #define PREFIX "ACPI: " @@ -65,11 +66,6 @@ #define MAX_NAME_LEN 20 -#define ACPI_VIDEO_DISPLAY_CRT 1 -#define ACPI_VIDEO_DISPLAY_TV 2 -#define ACPI_VIDEO_DISPLAY_DVI 3 -#define ACPI_VIDEO_DISPLAY_LCD 4 - #define _COMPONENT ACPI_VIDEO_COMPONENT ACPI_MODULE_NAME("video"); @@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) result = acpi_video_init_brightness(device); if (result) return; - name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; + count++; - sprintf(name, "acpi_video%d", count++); memset(&props, 0, sizeof(struct backlight_properties)); props.max_brightness = device->brightness->count - 3; device->backlight = backlight_device_register(name, NULL, device, @@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) if (device->cap._DCS && device->cap._DSS) { static int count; char *name; - name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, "acpi_video%d", count); if (!name) return; - sprintf(name, "acpi_video%d", count++); + count++; device->output_dev = video_output_register(name, NULL, device, &acpi_output_properties); kfree(name); @@ -1747,12 +1743,28 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id return NULL; } +static int +acpi_video_get_device_type(struct acpi_video_bus *video, + unsigned long device_id) +{ + struct acpi_video_enumerated_device *ids; + int i; + + for (i = 0; i < video->attached_count; i++) { + ids = &video->attached_array[i]; + if ((ids->value.int_val & 0xffff) == device_id) + return ids->value.int_val; + } + + return 0; +} + static int acpi_video_bus_get_one_device(struct acpi_device *device, struct acpi_video_bus *video) { unsigned long long device_id; - int status; + int status, device_type; struct acpi_video_device *data; struct acpi_video_device_attrib* attribute; @@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device, } if(attribute->bios_can_detect) data->flags.bios = 1; - } else - data->flags.unknown = 1; + } else { + /* Check for legacy IDs */ + device_type = acpi_video_get_device_type(video, + device_id); + /* Ignore bits 16 and 18-20 */ + switch (device_type & 0xffe2ffff) { + case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR: + data->flags.crt = 1; + break; + case ACPI_VIDEO_DISPLAY_LEGACY_PANEL: + data->flags.lcd = 1; + break; + case ACPI_VIDEO_DISPLAY_LEGACY_TV: + data->flags.tvout = 1; + break; + default: + data->flags.unknown = 1; + } + } acpi_video_device_bind(video, data); acpi_video_device_find_cap(data); @@ -2032,6 +2061,71 @@ out: return result; } +int acpi_video_get_edid(struct acpi_device *device, int type, int device_id, + void **edid) +{ + struct acpi_video_bus *video; + struct acpi_video_device *video_device; + union acpi_object *buffer = NULL; + acpi_status status; + int i, length; + + if (!device || !acpi_driver_data(device)) + return -EINVAL; + + video = acpi_driver_data(device); + + for (i = 0; i < video->attached_count; i++) { + video_device = video->attached_array[i].bind_info; + length = 256; + + if (!video_device) + continue; + + if (type) { + switch (type) { + case ACPI_VIDEO_DISPLAY_CRT: + if (!video_device->flags.crt) + continue; + break; + case ACPI_VIDEO_DISPLAY_TV: + if (!video_device->flags.tvout) + continue; + break; + case ACPI_VIDEO_DISPLAY_DVI: + if (!video_device->flags.dvi) + continue; + break; + case ACPI_VIDEO_DISPLAY_LCD: + if (!video_device->flags.lcd) + continue; + break; + } + } else if (video_device->device_id != device_id) { + continue; + } + + status = acpi_video_device_EDID(video_device, &buffer, length); + + if (ACPI_FAILURE(status) || !buffer || + buffer->type != ACPI_TYPE_BUFFER) { + length = 128; + status = acpi_video_device_EDID(video_device, &buffer, + length); + if (ACPI_FAILURE(status) || !buffer || + buffer->type != ACPI_TYPE_BUFFER) { + continue; + } + } + + *edid = buffer->buffer.pointer; + return length; + } + + return -ENODEV; +} +EXPORT_SYMBOL(acpi_video_get_edid); + static int acpi_video_bus_get_devices(struct acpi_video_bus *video, struct acpi_device *device) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index fc2f26b9b407..c5fef01b3c95 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str) ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; if (!strcmp("video", str)) acpi_video_support |= - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; } return 1; } diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index e68541f662b9..73f883333a0d 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -57,6 +57,8 @@ config SATA_PMP This option adds support for SATA Port Multipliers (the SATA version of an ethernet hub, or SAS expander). +comment "Controllers with non-SFF native interface" + config SATA_AHCI tristate "AHCI SATA support" depends on PCI @@ -73,11 +75,12 @@ config SATA_AHCI_PLATFORM If unsure, say N. -config SATA_SIL24 - tristate "Silicon Image 3124/3132 SATA support" - depends on PCI +config SATA_FSL + tristate "Freescale 3.0Gbps SATA support" + depends on FSL_SOC help - This option enables support for Silicon Image 3124/3132 Serial ATA. + This option enables support for Freescale 3.0Gbps SATA controller. + It can be found on MPC837x and MPC8315. If unsure, say N. @@ -87,12 +90,11 @@ config SATA_INIC162X help This option enables support for Initio 162x Serial ATA. -config SATA_FSL - tristate "Freescale 3.0Gbps SATA support" - depends on FSL_SOC +config SATA_SIL24 + tristate "Silicon Image 3124/3132 SATA support" + depends on PCI help - This option enables support for Freescale 3.0Gbps SATA controller. - It can be found on MPC837x and MPC8315. + This option enables support for Silicon Image 3124/3132 Serial ATA. If unsure, say N. @@ -116,15 +118,65 @@ config ATA_SFF if ATA_SFF -config SATA_SVW - tristate "ServerWorks Frodo / Apple K2 SATA support" +comment "SFF controllers with custom DMA interface" + +config PDC_ADMA + tristate "Pacific Digital ADMA support" depends on PCI help - This option enables support for Broadcom/Serverworks/Apple K2 - SATA support. + This option enables support for Pacific Digital ADMA controllers + + If unsure, say N. + +config PATA_MPC52xx + tristate "Freescale MPC52xx SoC internal IDE" + depends on PPC_MPC52xx && PPC_BESTCOMM + select PPC_BESTCOMM_ATA + help + This option enables support for integrated IDE controller + of the Freescale MPC52xx SoC. + + If unsure, say N. + +config PATA_OCTEON_CF + tristate "OCTEON Boot Bus Compact Flash support" + depends on CPU_CAVIUM_OCTEON + help + This option enables a polled compact flash driver for use with + compact flash cards attached to the OCTEON boot bus. + + If unsure, say N. + +config SATA_QSTOR + tristate "Pacific Digital SATA QStor support" + depends on PCI + help + This option enables support for Pacific Digital Serial ATA QStor. + + If unsure, say N. + +config SATA_SX4 + tristate "Promise SATA SX4 support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for Promise Serial ATA SX4. If unsure, say N. +config ATA_BMDMA + bool "ATA BMDMA support" + default y + help + This option adds support for SFF ATA controllers with BMDMA + capability. BMDMA stands for bus-master DMA and the + de-facto DMA interface for SFF controllers. + + If unuser, say Y. + +if ATA_BMDMA + +comment "SATA SFF controllers with BMDMA" + config ATA_PIIX tristate "Intel ESB, ICH, PIIX3, PIIX4 PATA/SATA support" depends on PCI @@ -152,22 +204,6 @@ config SATA_NV If unsure, say N. -config PDC_ADMA - tristate "Pacific Digital ADMA support" - depends on PCI - help - This option enables support for Pacific Digital ADMA controllers - - If unsure, say N. - -config SATA_QSTOR - tristate "Pacific Digital SATA QStor support" - depends on PCI - help - This option enables support for Pacific Digital Serial ATA QStor. - - If unsure, say N. - config SATA_PROMISE tristate "Promise SATA TX2/TX4 support" depends on PCI @@ -176,14 +212,6 @@ config SATA_PROMISE If unsure, say N. -config SATA_SX4 - tristate "Promise SATA SX4 support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for Promise Serial ATA SX4. - - If unsure, say N. - config SATA_SIL tristate "Silicon Image SATA support" depends on PCI @@ -203,6 +231,15 @@ config SATA_SIS enable the PATA_SIS driver in the config. If unsure, say N. +config SATA_SVW + tristate "ServerWorks Frodo / Apple K2 SATA support" + depends on PCI + help + This option enables support for Broadcom/Serverworks/Apple K2 + SATA support. + + If unsure, say N. + config SATA_ULI tristate "ULi Electronics SATA support" depends on PCI @@ -227,14 +264,7 @@ config SATA_VITESSE If unsure, say N. -config PATA_ACPI - tristate "ACPI firmware driver for PATA" - depends on ATA_ACPI - help - This option enables an ACPI method driver which drives - motherboard PATA controller interfaces through the ACPI - firmware in the BIOS. This driver can sometimes handle - otherwise unsupported hardware. +comment "PATA SFF controllers with BMDMA" config PATA_ALI tristate "ALi PATA support" @@ -262,40 +292,30 @@ config PATA_ARTOP If unsure, say N. -config PATA_ATP867X - tristate "ARTOP/Acard ATP867X PATA support" +config PATA_ATIIXP + tristate "ATI PATA support" depends on PCI help - This option enables support for ARTOP/Acard ATP867X PATA - controllers. - - If unsure, say N. - -config PATA_AT32 - tristate "Atmel AVR32 PATA support (Experimental)" - depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL - help - This option enables support for the IDE devices on the - Atmel AT32AP platform. + This option enables support for the ATI ATA interfaces + found on the many ATI chipsets. If unsure, say N. -config PATA_ATIIXP - tristate "ATI PATA support" +config PATA_ATP867X + tristate "ARTOP/Acard ATP867X PATA support" depends on PCI help - This option enables support for the ATI ATA interfaces - found on the many ATI chipsets. + This option enables support for ARTOP/Acard ATP867X PATA + controllers. If unsure, say N. -config PATA_CMD640_PCI - tristate "CMD640 PCI PATA support (Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_BF54X + tristate "Blackfin 54x ATAPI support" + depends on BF542 || BF548 || BF549 help - This option enables support for the CMD640 PCI IDE - interface chip. Only the primary channel is currently - supported. + This option enables support for the built-in ATAPI controller on + Blackfin 54x family chips. If unsure, say N. @@ -362,15 +382,6 @@ config PATA_EFAR If unsure, say N. -config ATA_GENERIC - tristate "Generic ATA support" - depends on PCI - help - This option enables support for generic BIOS configured - ATA controllers via the new ATA layer - - If unsure, say N. - config PATA_HPT366 tristate "HPT 366/368 PATA support" depends on PCI @@ -415,12 +426,20 @@ config PATA_HPT3X3_DMA controllers. Enable with care as there are still some problems with DMA on this chipset. -config PATA_ISAPNP - tristate "ISA Plug and Play PATA support" - depends on ISAPNP +config PATA_ICSIDE + tristate "Acorn ICS PATA support" + depends on ARM && ARCH_ACORN help - This option enables support for ISA plug & play ATA - controllers such as those found on old soundcards. + On Acorn systems, say Y here if you wish to use the ICS PATA + interface card. This is not required for ICS partition support. + If you are unsure, say N to this. + +config PATA_IT8213 + tristate "IT8213 PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the ITE 821 PATA + controllers via the new ATA layer. If unsure, say N. @@ -434,15 +453,6 @@ config PATA_IT821X If unsure, say N. -config PATA_IT8213 - tristate "IT8213 PATA support (Experimental)" - depends on PCI && EXPERIMENTAL - help - This option enables support for the ITE 821 PATA - controllers via the new ATA layer. - - If unsure, say N. - config PATA_JMICRON tristate "JMicron PATA support" depends on PCI @@ -452,23 +462,14 @@ config PATA_JMICRON If unsure, say N. -config PATA_LEGACY - tristate "Legacy ISA PATA support (Experimental)" - depends on (ISA || PCI) && EXPERIMENTAL - help - This option enables support for ISA/VLB/PCI bus legacy PATA - ports and allows them to be accessed via the new ATA layer. - - If unsure, say N. - -config PATA_TRIFLEX - tristate "Compaq Triflex PATA support" - depends on PCI +config PATA_MACIO + tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" + depends on PPC_PMAC help - Enable support for the Compaq 'Triflex' IDE controller as found - on many Compaq Pentium-Pro systems, via the new ATA layer. - - If unsure, say N. + Most IDE capable PowerMacs have IDE busses driven by a variant + of this controller which is part of the Apple chipset used on + most PowerMac models. Some models have multiple busses using + different chipsets, though generally, MacIO is one of them. config PATA_MARVELL tristate "Marvell PATA support via legacy mode" @@ -481,32 +482,6 @@ config PATA_MARVELL If unsure, say N. -config PATA_MPC52xx - tristate "Freescale MPC52xx SoC internal IDE" - depends on PPC_MPC52xx && PPC_BESTCOMM - select PPC_BESTCOMM_ATA - help - This option enables support for integrated IDE controller - of the Freescale MPC52xx SoC. - - If unsure, say N. - -config PATA_MPIIX - tristate "Intel PATA MPIIX support" - depends on PCI - help - This option enables support for MPIIX PATA support. - - If unsure, say N. - -config PATA_OLDPIIX - tristate "Intel PATA old PIIX support" - depends on PCI - help - This option enables support for early PIIX PATA support. - - If unsure, say N. - config PATA_NETCELL tristate "NETCELL Revolution RAID support" depends on PCI @@ -525,15 +500,6 @@ config PATA_NINJA32 If unsure, say N. -config PATA_NS87410 - tristate "Nat Semi NS87410 PATA support" - depends on PCI - help - This option enables support for the National Semiconductor - NS87410 PCI-IDE controller. - - If unsure, say N. - config PATA_NS87415 tristate "Nat Semi NS87415 PATA support" depends on PCI @@ -543,12 +509,11 @@ config PATA_NS87415 If unsure, say N. -config PATA_OPTI - tristate "OPTI621/6215 PATA support (Very Experimental)" - depends on PCI && EXPERIMENTAL +config PATA_OLDPIIX + tristate "Intel PATA old PIIX support" + depends on PCI help - This option enables full PIO support for the early Opti ATA - controllers found on some old motherboards. + This option enables support for early PIIX PATA support. If unsure, say N. @@ -562,24 +527,6 @@ config PATA_OPTIDMA If unsure, say N. -config PATA_PALMLD - tristate "Palm LifeDrive PATA support" - depends on MACH_PALMLD - help - This option enables support for Palm LifeDrive's internal ATA - port via the new ATA layer. - - If unsure, say N. - -config PATA_PCMCIA - tristate "PCMCIA PATA support" - depends on PCMCIA - help - This option enables support for PCMCIA ATA interfaces, including - compact flash card adapters via the new ATA layer. - - If unsure, say N. - config PATA_PDC2027X tristate "Promise PATA 2027x support" depends on PCI @@ -597,12 +544,6 @@ config PATA_PDC_OLD If unsure, say N. -config PATA_QDI - tristate "QDI VLB PATA support" - depends on ISA - help - Support for QDI 6500 and 6580 PATA controllers on VESA local bus. - config PATA_RADISYS tristate "RADISYS 82600 PATA support (Experimental)" depends on PCI && EXPERIMENTAL @@ -612,15 +553,6 @@ config PATA_RADISYS If unsure, say N. -config PATA_RB532 - tristate "RouterBoard 532 PATA CompactFlash support" - depends on MIKROTIK_RB532 - help - This option enables support for the RouterBoard 532 - PATA CompactFlash controller. - - If unsure, say N. - config PATA_RDC tristate "RDC PATA support" depends on PCI @@ -631,21 +563,30 @@ config PATA_RDC If unsure, say N. -config PATA_RZ1000 - tristate "PC Tech RZ1000 PATA support" +config PATA_SC1200 + tristate "SC1200 PATA support" depends on PCI help - This option enables basic support for the PC Tech RZ1000/1 - PATA controllers via the new ATA layer + This option enables support for the NatSemi/AMD SC1200 SoC + companion chip used with the Geode processor family. If unsure, say N. -config PATA_SC1200 - tristate "SC1200 PATA support" +config PATA_SCC + tristate "Toshiba's Cell Reference Set IDE support" + depends on PCI && PPC_CELLEB + help + This option enables support for the built-in IDE controller on + Toshiba Cell Reference Board. + + If unsure, say N. + +config PATA_SCH + tristate "Intel SCH PATA support" depends on PCI help - This option enables support for the NatSemi/AMD SC1200 SoC - companion chip used with the Geode processor family. + This option enables support for Intel SCH PATA on the Intel + SCH (US15W, US15L, UL11L) series host controllers. If unsure, say N. @@ -683,6 +624,15 @@ config PATA_TOSHIBA If unsure, say N. +config PATA_TRIFLEX + tristate "Compaq Triflex PATA support" + depends on PCI + help + Enable support for the Compaq 'Triflex' IDE controller as found + on many Compaq Pentium-Pro systems, via the new ATA layer. + + If unsure, say N. + config PATA_VIA tristate "VIA PATA support" depends on PCI @@ -701,12 +651,99 @@ config PATA_WINBOND If unsure, say N. -config PATA_WINBOND_VLB - tristate "Winbond W83759A VLB PATA support (Experimental)" - depends on ISA && EXPERIMENTAL +endif # ATA_BMDMA + +comment "PIO-only SFF controllers" + +config PATA_AT32 + tristate "Atmel AVR32 PATA support (Experimental)" + depends on AVR32 && PLATFORM_AT32AP && EXPERIMENTAL help - Support for the Winbond W83759A controller on Vesa Local Bus - systems. + This option enables support for the IDE devices on the + Atmel AT32AP platform. + + If unsure, say N. + +config PATA_AT91 + tristate "PATA support for AT91SAM9260" + depends on ARM && ARCH_AT91 + help + This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. + + If unsure, say N. + +config PATA_CMD640_PCI + tristate "CMD640 PCI PATA support (Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables support for the CMD640 PCI IDE + interface chip. Only the primary channel is currently + supported. + + If unsure, say N. + +config PATA_ISAPNP + tristate "ISA Plug and Play PATA support" + depends on ISAPNP + help + This option enables support for ISA plug & play ATA + controllers such as those found on old soundcards. + + If unsure, say N. + +config PATA_IXP4XX_CF + tristate "IXP4XX Compact Flash support" + depends on ARCH_IXP4XX + help + This option enables support for a Compact Flash connected on + the ixp4xx expansion bus. This driver had been written for + Loft/Avila boards in mind but can work with others. + + If unsure, say N. + +config PATA_MPIIX + tristate "Intel PATA MPIIX support" + depends on PCI + help + This option enables support for MPIIX PATA support. + + If unsure, say N. + +config PATA_NS87410 + tristate "Nat Semi NS87410 PATA support" + depends on PCI + help + This option enables support for the National Semiconductor + NS87410 PCI-IDE controller. + + If unsure, say N. + +config PATA_OPTI + tristate "OPTI621/6215 PATA support (Very Experimental)" + depends on PCI && EXPERIMENTAL + help + This option enables full PIO support for the early Opti ATA + controllers found on some old motherboards. + + If unsure, say N. + +config PATA_PALMLD + tristate "Palm LifeDrive PATA support" + depends on MACH_PALMLD + help + This option enables support for Palm LifeDrive's internal ATA + port via the new ATA layer. + + If unsure, say N. + +config PATA_PCMCIA + tristate "PCMCIA PATA support" + depends on PCMCIA + help + This option enables support for PCMCIA ATA interfaces, including + compact flash card adapters via the new ATA layer. + + If unsure, say N. config HAVE_PATA_PLATFORM bool @@ -725,14 +762,6 @@ config PATA_PLATFORM If unsure, say N. -config PATA_AT91 - tristate "PATA support for AT91SAM9260" - depends on ARM && ARCH_AT91 - help - This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. - - If unsure, say N. - config PATA_OF_PLATFORM tristate "OpenFirmware platform device PATA support" depends on PATA_PLATFORM && PPC_OF @@ -743,69 +772,65 @@ config PATA_OF_PLATFORM If unsure, say N. -config PATA_ICSIDE - tristate "Acorn ICS PATA support" - depends on ARM && ARCH_ACORN +config PATA_QDI + tristate "QDI VLB PATA support" + depends on ISA help - On Acorn systems, say Y here if you wish to use the ICS PATA - interface card. This is not required for ICS partition support. - If you are unsure, say N to this. + Support for QDI 6500 and 6580 PATA controllers on VESA local bus. -config PATA_IXP4XX_CF - tristate "IXP4XX Compact Flash support" - depends on ARCH_IXP4XX +config PATA_RB532 + tristate "RouterBoard 532 PATA CompactFlash support" + depends on MIKROTIK_RB532 help - This option enables support for a Compact Flash connected on - the ixp4xx expansion bus. This driver had been written for - Loft/Avila boards in mind but can work with others. + This option enables support for the RouterBoard 532 + PATA CompactFlash controller. If unsure, say N. -config PATA_OCTEON_CF - tristate "OCTEON Boot Bus Compact Flash support" - depends on CPU_CAVIUM_OCTEON +config PATA_RZ1000 + tristate "PC Tech RZ1000 PATA support" + depends on PCI help - This option enables a polled compact flash driver for use with - compact flash cards attached to the OCTEON boot bus. + This option enables basic support for the PC Tech RZ1000/1 + PATA controllers via the new ATA layer If unsure, say N. -config PATA_SCC - tristate "Toshiba's Cell Reference Set IDE support" - depends on PCI && PPC_CELLEB +config PATA_WINBOND_VLB + tristate "Winbond W83759A VLB PATA support (Experimental)" + depends on ISA && EXPERIMENTAL help - This option enables support for the built-in IDE controller on - Toshiba Cell Reference Board. + Support for the Winbond W83759A controller on Vesa Local Bus + systems. - If unsure, say N. +comment "Generic fallback / legacy drivers" -config PATA_SCH - tristate "Intel SCH PATA support" - depends on PCI +config PATA_ACPI + tristate "ACPI firmware driver for PATA" + depends on ATA_ACPI && ATA_BMDMA help - This option enables support for Intel SCH PATA on the Intel - SCH (US15W, US15L, UL11L) series host controllers. - - If unsure, say N. + This option enables an ACPI method driver which drives + motherboard PATA controller interfaces through the ACPI + firmware in the BIOS. This driver can sometimes handle + otherwise unsupported hardware. -config PATA_BF54X - tristate "Blackfin 54x ATAPI support" - depends on BF542 || BF548 || BF549 +config ATA_GENERIC + tristate "Generic ATA support" + depends on PCI && ATA_BMDMA help - This option enables support for the built-in ATAPI controller on - Blackfin 54x family chips. + This option enables support for generic BIOS configured + ATA controllers via the new ATA layer If unsure, say N. -config PATA_MACIO - tristate "Apple PowerMac/PowerBook internal 'MacIO' IDE" - depends on PPC_PMAC +config PATA_LEGACY + tristate "Legacy ISA PATA support (Experimental)" + depends on (ISA || PCI) && EXPERIMENTAL help - Most IDE capable PowerMacs have IDE busses driven by a variant - of this controller which is part of the Apple chipset used on - most PowerMac models. Some models have multiple busses using - different chipsets, though generally, MacIO is one of them. + This option enables support for ISA/VLB/PCI bus legacy PATA + ports and allows them to be accessed via the new ATA layer. + If unsure, say N. endif # ATA_SFF endif # ATA diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d0a93c4ad3ec..7ef89d73df63 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -1,33 +1,39 @@ obj-$(CONFIG_ATA) += libata.o +# non-SFF interface obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o -obj-$(CONFIG_SATA_SVW) += sata_svw.o +obj-$(CONFIG_SATA_FSL) += sata_fsl.o +obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o +obj-$(CONFIG_SATA_SIL24) += sata_sil24.o + +# SFF w/ custom DMA +obj-$(CONFIG_PDC_ADMA) += pdc_adma.o +obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o +obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o +obj-$(CONFIG_SATA_SX4) += sata_sx4.o + +# SFF SATA w/ BMDMA obj-$(CONFIG_ATA_PIIX) += ata_piix.o +obj-$(CONFIG_SATA_MV) += sata_mv.o +obj-$(CONFIG_SATA_NV) += sata_nv.o obj-$(CONFIG_SATA_PROMISE) += sata_promise.o -obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o obj-$(CONFIG_SATA_SIL) += sata_sil.o -obj-$(CONFIG_SATA_SIL24) += sata_sil24.o -obj-$(CONFIG_SATA_VIA) += sata_via.o -obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o obj-$(CONFIG_SATA_SIS) += sata_sis.o -obj-$(CONFIG_SATA_SX4) += sata_sx4.o -obj-$(CONFIG_SATA_NV) += sata_nv.o +obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o -obj-$(CONFIG_SATA_MV) += sata_mv.o -obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o -obj-$(CONFIG_PDC_ADMA) += pdc_adma.o -obj-$(CONFIG_SATA_FSL) += sata_fsl.o -obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_SATA_VIA) += sata_via.o +obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o +# SFF PATA w/ BMDMA obj-$(CONFIG_PATA_ALI) += pata_ali.o obj-$(CONFIG_PATA_AMD) += pata_amd.o obj-$(CONFIG_PATA_ARTOP) += pata_artop.o -obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o -obj-$(CONFIG_PATA_AT32) += pata_at32.o obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o -obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ATP867X) += pata_atp867x.o +obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o @@ -39,47 +45,50 @@ obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o -obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o -obj-$(CONFIG_PATA_IT821X) += pata_it821x.o +obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o obj-$(CONFIG_PATA_IT8213) += pata_it8213.o +obj-$(CONFIG_PATA_IT821X) += pata_it821x.o obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o +obj-$(CONFIG_PATA_MACIO) += pata_macio.o +obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o -obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o -obj-$(CONFIG_PATA_OPTI) += pata_opti.o -obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o -obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o -obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o -obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o -obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o -obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o -obj-$(CONFIG_PATA_QDI) += pata_qdi.o obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o -obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o obj-$(CONFIG_PATA_RDC) += pata_rdc.o -obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o +obj-$(CONFIG_PATA_SCC) += pata_scc.o +obj-$(CONFIG_PATA_SCH) += pata_sch.o obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o obj-$(CONFIG_PATA_SIL680) += pata_sil680.o +obj-$(CONFIG_PATA_SIS) += pata_sis.o obj-$(CONFIG_PATA_TOSHIBA) += pata_piccolo.o +obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o obj-$(CONFIG_PATA_VIA) += pata_via.o obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o -obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o -obj-$(CONFIG_PATA_SIS) += pata_sis.o -obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o + +# SFF PIO only +obj-$(CONFIG_PATA_AT32) += pata_at32.o +obj-$(CONFIG_PATA_AT91) += pata_at91.o +obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o +obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o -obj-$(CONFIG_PATA_SCC) += pata_scc.o -obj-$(CONFIG_PATA_SCH) += pata_sch.o -obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o -obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o +obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o +obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o +obj-$(CONFIG_PATA_OPTI) += pata_opti.o +obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o +obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o -obj-$(CONFIG_PATA_AT91) += pata_at91.o obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o -obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o +obj-$(CONFIG_PATA_QDI) += pata_qdi.o +obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o +obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o +obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o + # Should be last but two libata driver obj-$(CONFIG_PATA_ACPI) += pata_acpi.o # Should be last but one libata driver diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index 33fb614f9784..573158a9668d 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -155,7 +155,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id return rc; pcim_pin_device(dev); } - return ata_pci_sff_init_one(dev, ppi, &generic_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0); } static struct pci_device_id ata_generic[] = { diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ec52fc618763..7409f98d2ae6 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -1589,7 +1589,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, hpriv->map = piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -1626,7 +1626,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); } static void piix_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index c47373f01f89..06b7e49e039c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -160,6 +160,10 @@ int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); + MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); MODULE_LICENSE("GPL"); @@ -2122,6 +2126,14 @@ retry: goto err_out; } + if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " + "class=%d may_fallback=%d tried_spinup=%d\n", + class, may_fallback, tried_spinup); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); + } + /* Falling back doesn't make sense if ID data was read * successfully at least once. */ @@ -2510,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev) * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; @@ -6372,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur, { "3.0Gbps", .spd_limit = 2 }, { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, + { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 19ddf924944f..efa4a18cfb9d 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -63,7 +63,6 @@ const struct ata_port_operations ata_sff_port_ops = { .sff_tf_read = ata_sff_tf_read, .sff_exec_command = ata_sff_exec_command, .sff_data_xfer = ata_sff_data_xfer, - .sff_irq_clear = ata_sff_irq_clear, .sff_drain_fifo = ata_sff_drain_fifo, .lost_interrupt = ata_sff_lost_interrupt, @@ -395,32 +394,11 @@ void ata_sff_irq_on(struct ata_port *ap) ata_sff_set_devctl(ap, ap->ctl); ata_wait_idle(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_irq_on); -/** - * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. - * @ap: Port associated with this ATA transaction. - * - * Clear interrupt and error flags in DMA status register. - * - * May be used as the irq_clear() entry in ata_port_operations. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ -void ata_sff_irq_clear(struct ata_port *ap) -{ - void __iomem *mmio = ap->ioaddr.bmdma_addr; - - if (!mmio) - return; - - iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); -} -EXPORT_SYMBOL_GPL(ata_sff_irq_clear); - /** * ata_sff_tf_load - send taskfile registers to host controller * @ap: Port to which output is sent @@ -820,11 +798,15 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_LAST; break; +#ifdef CONFIG_ATA_BMDMA case ATAPI_PROT_DMA: ap->hsm_task_state = HSM_ST_LAST; /* initiate bmdma */ ap->ops->bmdma_start(qc); break; +#endif /* CONFIG_ATA_BMDMA */ + default: + BUG(); } } @@ -1491,27 +1473,27 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -/** - * ata_sff_host_intr - Handle host interrupt for given (port, task) - * @ap: Port on which interrupt arrived (possibly...) - * @qc: Taskfile currently active in engine - * - * Handle host interrupt for given queued command. Currently, - * only DMA interrupts are handled. All other commands are - * handled via polling with interrupts disabled (nIEN bit). - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * One if interrupt was handled, zero if not (shared irq). - */ -unsigned int ata_sff_host_intr(struct ata_port *ap, - struct ata_queued_cmd *qc) +static unsigned int ata_sff_idle_irq(struct ata_port *ap) { - struct ata_eh_info *ehi = &ap->link.eh_info; - u8 status, host_stat = 0; - bool bmdma_stopped = false; + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + ap->ops->sff_check_status(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); + ata_port_printk(ap, KERN_WARNING, "irq trap\n"); + return 1; + } +#endif + return 0; /* irq not handled */ +} + +static unsigned int __ata_sff_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc, + bool hsmv_on_idle) +{ + u8 status; VPRINTK("ata%u: protocol %d task_state %d\n", ap->print_id, qc->tf.protocol, ap->hsm_task_state); @@ -1528,90 +1510,56 @@ unsigned int ata_sff_host_intr(struct ata_port *ap, * need to check ata_is_atapi(qc->tf.protocol) again. */ if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) - goto idle_irq; - break; - case HSM_ST_LAST: - if (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA) { - /* check status of DMA engine */ - host_stat = ap->ops->bmdma_status(ap); - VPRINTK("ata%u: host_stat 0x%X\n", - ap->print_id, host_stat); - - /* if it's not our irq... */ - if (!(host_stat & ATA_DMA_INTR)) - goto idle_irq; - - /* before we do anything else, clear DMA-Start bit */ - ap->ops->bmdma_stop(qc); - bmdma_stopped = true; - - if (unlikely(host_stat & ATA_DMA_ERR)) { - /* error when transfering data to/from memory */ - qc->err_mask |= AC_ERR_HOST_BUS; - ap->hsm_task_state = HSM_ST_ERR; - } - } + return ata_sff_idle_irq(ap); break; case HSM_ST: + case HSM_ST_LAST: break; default: - goto idle_irq; + return ata_sff_idle_irq(ap); } - /* check main status, clearing INTRQ if needed */ status = ata_sff_irq_status(ap); if (status & ATA_BUSY) { - if (bmdma_stopped) { + if (hsmv_on_idle) { /* BMDMA engine is already stopped, we're screwed */ qc->err_mask |= AC_ERR_HSM; ap->hsm_task_state = HSM_ST_ERR; } else - goto idle_irq; + return ata_sff_idle_irq(ap); } /* clear irq events */ - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_hsm_move(ap, qc, status, 0); - if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || - qc->tf.protocol == ATAPI_PROT_DMA)) - ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); - return 1; /* irq handled */ - -idle_irq: - ap->stats.idle_irq++; - -#ifdef ATA_IRQ_TRAP - if ((ap->stats.idle_irq % 1000) == 0) { - ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); - ata_port_printk(ap, KERN_WARNING, "irq trap\n"); - return 1; - } -#endif - return 0; /* irq not handled */ } -EXPORT_SYMBOL_GPL(ata_sff_host_intr); /** - * ata_sff_interrupt - Default ATA host interrupt handler - * @irq: irq line (unused) - * @dev_instance: pointer to our ata_host information structure + * ata_sff_port_intr - Handle SFF port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine * - * Default interrupt handler for PCI IDE devices. Calls - * ata_sff_host_intr() for each port that is not disabled. + * Handle port interrupt for given queued command. * * LOCKING: - * Obtains host lock during operation. + * spin_lock_irqsave(host lock) * * RETURNS: - * IRQ_NONE or IRQ_HANDLED. + * One if interrupt was handled, zero if not (shared irq). */ -irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + return __ata_sff_port_intr(ap, qc, false); +} +EXPORT_SYMBOL_GPL(ata_sff_port_intr); + +static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, + unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) { struct ata_host *host = dev_instance; bool retried = false; @@ -1631,7 +1579,7 @@ retry: qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) { if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + handled |= port_intr(ap, qc); else polling |= 1 << i; } else @@ -1658,7 +1606,8 @@ retry: if (idle & (1 << i)) { ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } else { /* clear INTRQ and check if BUSY cleared */ if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) @@ -1680,6 +1629,25 @@ retry: return IRQ_RETVAL(handled); } + +/** + * ata_sff_interrupt - Default SFF ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_sff_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); +} EXPORT_SYMBOL_GPL(ata_sff_interrupt); /** @@ -1717,7 +1685,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap) status); /* Run the host interrupt logic as if the interrupt had not been lost */ - ata_sff_host_intr(ap, qc); + ata_sff_port_intr(ap, qc); } EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); @@ -1744,7 +1712,8 @@ void ata_sff_freeze(struct ata_port *ap) */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } EXPORT_SYMBOL_GPL(ata_sff_freeze); @@ -1761,7 +1730,8 @@ void ata_sff_thaw(struct ata_port *ap) { /* clear & re-enable interrupts */ ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); ata_sff_irq_on(ap); } EXPORT_SYMBOL_GPL(ata_sff_thaw); @@ -2349,13 +2319,13 @@ int ata_pci_sff_init_host(struct ata_host *host) EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** - * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host + * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host * @pdev: target PCI device * @ppi: array of port_info, must be enough for two ports * @r_host: out argument for the initialized ATA host * - * Helper to allocate ATA host for @pdev, acquire all native PCI - * resources and initialize it accordingly in one go. + * Helper to allocate PIO-only SFF ATA host for @pdev, acquire + * all PCI resources and initialize it accordingly in one go. * * LOCKING: * Inherited from calling layer (may sleep). @@ -2385,9 +2355,6 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, if (rc) goto err_out; - /* init DMA related stuff */ - ata_pci_bmdma_init(host); - devres_remove_group(&pdev->dev, NULL); *r_host = host; return 0; @@ -2492,8 +2459,21 @@ out: } EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); +static const struct ata_port_info *ata_sff_find_valid_pi( + const struct ata_port_info * const *ppi) +{ + int i; + + /* look up the first valid port_info */ + for (i = 0; i < 2 && ppi[i]; i++) + if (ppi[i]->port_ops != &ata_dummy_port_ops) + return ppi[i]; + + return NULL; +} + /** - * ata_pci_sff_init_one - Initialize/register PCI IDE host controller + * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller * @pdev: Controller to be initialized * @ppi: array of port_info, must be enough for two ports * @sht: scsi_host_template to use when registering the host @@ -2502,11 +2482,7 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); * * This is a helper function which can be called from a driver's * xxx_init_one() probe function if the hardware uses traditional - * IDE taskfile registers. - * - * This function calls pci_enable_device(), reserves its register - * regions, sets the dma mask, enables bus master mode, and calls - * ata_device_add() + * IDE taskfile registers and is PIO only. * * ASSUMPTION: * Nobody makes a single channel controller that appears solely as @@ -2523,20 +2499,13 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, struct scsi_host_template *sht, void *host_priv, int hflag) { struct device *dev = &pdev->dev; - const struct ata_port_info *pi = NULL; + const struct ata_port_info *pi; struct ata_host *host = NULL; - int i, rc; + int rc; DPRINTK("ENTER\n"); - /* look up the first valid port_info */ - for (i = 0; i < 2 && ppi[i]; i++) { - if (ppi[i]->port_ops != &ata_dummy_port_ops) { - pi = ppi[i]; - break; - } - } - + pi = ata_sff_find_valid_pi(ppi); if (!pi) { dev_printk(KERN_ERR, &pdev->dev, "no valid port_info specified\n"); @@ -2557,7 +2526,6 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, host->private_data = host_priv; host->flags |= hflag; - pci_set_master(pdev); rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); out: if (rc == 0) @@ -2571,6 +2539,12 @@ EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); #endif /* CONFIG_PCI */ +/* + * BMDMA support + */ + +#ifdef CONFIG_ATA_BMDMA + const struct ata_port_operations ata_bmdma_port_ops = { .inherits = &ata_sff_port_ops, @@ -2580,6 +2554,7 @@ const struct ata_port_operations ata_bmdma_port_ops = { .qc_prep = ata_bmdma_qc_prep, .qc_issue = ata_bmdma_qc_issue, + .sff_irq_clear = ata_bmdma_irq_clear, .bmdma_setup = ata_bmdma_setup, .bmdma_start = ata_bmdma_start, .bmdma_stop = ata_bmdma_stop, @@ -2803,6 +2778,75 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); +/** + * ata_bmdma_port_intr - Handle BMDMA port interrupt + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle port interrupt for given queued command. + * + * LOCKING: + * spin_lock_irqsave(host lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ +unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) +{ + struct ata_eh_info *ehi = &ap->link.eh_info; + u8 host_stat = 0; + bool bmdma_stopped = false; + unsigned int handled; + + if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { + /* check status of DMA engine */ + host_stat = ap->ops->bmdma_status(ap); + VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); + + /* if it's not our irq... */ + if (!(host_stat & ATA_DMA_INTR)) + return ata_sff_idle_irq(ap); + + /* before we do anything else, clear DMA-Start bit */ + ap->ops->bmdma_stop(qc); + bmdma_stopped = true; + + if (unlikely(host_stat & ATA_DMA_ERR)) { + /* error when transfering data to/from memory */ + qc->err_mask |= AC_ERR_HOST_BUS; + ap->hsm_task_state = HSM_ST_ERR; + } + } + + handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); + + if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) + ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); + + return handled; +} +EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); + +/** + * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler + * @irq: irq line (unused) + * @dev_instance: pointer to our ata_host information structure + * + * Default interrupt handler for PCI IDE devices. Calls + * ata_bmdma_port_intr() for each port that is not disabled. + * + * LOCKING: + * Obtains host lock during operation. + * + * RETURNS: + * IRQ_NONE or IRQ_HANDLED. + */ +irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) +{ + return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); +} +EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); + /** * ata_bmdma_error_handler - Stock error handler for BMDMA controller * @ap: port to handle error for @@ -2848,7 +2892,8 @@ void ata_bmdma_error_handler(struct ata_port *ap) /* if we're gonna thaw, make sure IRQ is clear */ if (thaw) { ap->ops->sff_check_status(ap); - ap->ops->sff_irq_clear(ap); + if (ap->ops->sff_irq_clear) + ap->ops->sff_irq_clear(ap); } } @@ -2881,6 +2926,28 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) } EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); +/** + * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. + * @ap: Port associated with this ATA transaction. + * + * Clear interrupt and error flags in DMA status register. + * + * May be used as the irq_clear() entry in ata_port_operations. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +void ata_bmdma_irq_clear(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + if (!mmio) + return; + + iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); +} +EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); + /** * ata_bmdma_setup - Set up PCI IDE BMDMA transaction * @qc: Info associated with this ATA transaction. @@ -3137,7 +3204,100 @@ void ata_pci_bmdma_init(struct ata_host *host) } EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); +/** + * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host + * @pdev: target PCI device + * @ppi: array of port_info, must be enough for two ports + * @r_host: out argument for the initialized ATA host + * + * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI + * resources and initialize it accordingly in one go. + * + * LOCKING: + * Inherited from calling layer (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host) +{ + int rc; + + rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); + if (rc) + return rc; + + ata_pci_bmdma_init(*r_host); + return 0; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); + +/** + * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller + * @pdev: Controller to be initialized + * @ppi: array of port_info, must be enough for two ports + * @sht: scsi_host_template to use when registering the host + * @host_priv: host private_data + * @hflags: host flags + * + * This function is similar to ata_pci_sff_init_one() but also + * takes care of BMDMA initialization. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, negative on errno-based value on error. + */ +int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, void *host_priv, + int hflags) +{ + struct device *dev = &pdev->dev; + const struct ata_port_info *pi; + struct ata_host *host = NULL; + int rc; + + DPRINTK("ENTER\n"); + + pi = ata_sff_find_valid_pi(ppi); + if (!pi) { + dev_printk(KERN_ERR, &pdev->dev, + "no valid port_info specified\n"); + return -EINVAL; + } + + if (!devres_open_group(dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + rc = pcim_enable_device(pdev); + if (rc) + goto out; + + /* prepare and activate BMDMA host */ + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); + if (rc) + goto out; + host->private_data = host_priv; + host->flags |= hflags; + + pci_set_master(pdev); + rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); + out: + if (rc == 0) + devres_remove_group(&pdev->dev, NULL); + else + devres_release_group(&pdev->dev, NULL); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); + #endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ /** * ata_sff_port_init - Initialize SFF/BMDMA ATA port diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c index 066b9f301ed5..c8d47034d5e9 100644 --- a/drivers/ata/pata_acpi.c +++ b/drivers/ata/pata_acpi.c @@ -260,7 +260,7 @@ static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id) return rc; pcim_pin_device(pdev); } - return ata_pci_sff_init_one(pdev, ppi, &pacpi_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &pacpi_sht, NULL, 0); } static const struct pci_device_id pacpi_pci_tbl[] = { diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index f306e10c748d..794ec6e3275d 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -583,7 +583,10 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[0] = &info_20_udma; } - return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + if (!ppi[0]->mwdma_mask && !ppi[0]->udma_mask) + return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL, 0); + else + return ata_pci_bmdma_init_one(pdev, ppi, &ali_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index d95eca9c547e..620a07cabe31 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -574,7 +574,7 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* And fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &amd_sht, hpriv, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &amd_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c index 4d066d6c30fa..ba43f0f8c880 100644 --- a/drivers/ata/pata_artop.c +++ b/drivers/ata/pata_artop.c @@ -421,7 +421,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) BUG_ON(ppi[0] == NULL); - return ata_pci_sff_init_one(pdev, ppi, &artop_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); } static const struct pci_device_id artop_pci_tbl[] = { diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 44d88b380ddd..43755616dc5a 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -246,8 +246,8 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i])) ppi[i] = &ata_dummy_port_info; - return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id atiixp[] = { diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c index bb6e0746e07d..95295935dd95 100644 --- a/drivers/ata/pata_atp867x.c +++ b/drivers/ata/pata_atp867x.c @@ -525,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev, pci_set_master(pdev); - rc = ata_host_activate(host, pdev->irq, ata_sff_interrupt, + rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &atp867x_sht); if (rc) dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n"); diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 6422cfd13d0d..9cae65de750e 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -1214,7 +1214,7 @@ static unsigned int bfin_data_xfer(struct ata_device *dev, unsigned char *buf, * bfin_irq_clear - Clear ATAPI interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void bfin_irq_clear(struct ata_port *ap) diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 4c81a71b8877..9f5da1c7454b 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -367,7 +367,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_write_config_byte(pdev, UDIDETCR0, 0xF0); #endif - return ata_pci_sff_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c index 17c5f346ff01..030952f1f97c 100644 --- a/drivers/ata/pata_cs5520.c +++ b/drivers/ata/pata_cs5520.c @@ -221,7 +221,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi continue; rc = devm_request_irq(&pdev->dev, irq[ap->port_no], - ata_sff_interrupt, 0, DRV_NAME, host); + ata_bmdma_interrupt, 0, DRV_NAME, host); if (rc) return rc; diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c index e809a4233a81..f792330f0d8e 100644 --- a/drivers/ata/pata_cs5530.c +++ b/drivers/ata/pata_cs5530.c @@ -324,7 +324,7 @@ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ppi[1] = &info_palmax_secondary; /* Now kick off ATA set up */ - return ata_pci_sff_init_one(pdev, ppi, &cs5530_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c index a02e6459fdcc..03a93186aa19 100644 --- a/drivers/ata/pata_cs5535.c +++ b/drivers/ata/pata_cs5535.c @@ -198,7 +198,7 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id) rdmsr(ATAC_CH0D1_PIO, timings, dummy); if (CS5535_BAD_PIO(timings)) wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0); - return ata_pci_sff_init_one(dev, ppi, &cs5535_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0); } static const struct pci_device_id cs5535[] = { diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index 914ae3506ff5..21ee23f89e88 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -260,7 +260,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id) return -ENODEV; } - return ata_pci_sff_init_one(dev, ppi, &cs5536_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &cs5536_sht, NULL, 0); } static const struct pci_device_id cs5536[] = { diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c index 0fcc096b8dac..6d915b063d93 100644 --- a/drivers/ata/pata_cypress.c +++ b/drivers/ata/pata_cypress.c @@ -138,7 +138,7 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i if (PCI_FUNC(pdev->devfn) != 1) return -ENODEV; - return ata_pci_sff_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &cy82c693_sht, NULL, 0); } static const struct pci_device_id cy82c693[] = { diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c index 3bac0e079691..a08834758ea2 100644 --- a/drivers/ata/pata_efar.c +++ b/drivers/ata/pata_efar.c @@ -277,8 +277,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &efar_sht, NULL, - ATA_HOST_PARALLEL_SCAN); + return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL, + ATA_HOST_PARALLEL_SCAN); } static const struct pci_device_id efar_pci_tbl[] = { diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 8580eb3cd54d..7688868557b9 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -361,7 +361,7 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id) break; } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt36x_sht, hpriv, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 98b498b6907c..9ae4c0830577 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -987,7 +987,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) } /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt37x_sht, private_data, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt37x_sht, private_data, 0); } static const struct pci_device_id hpt37x[] = { diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c index 8b95aeba0e74..32f3463216b8 100644 --- a/drivers/ata/pata_hpt3x2n.c +++ b/drivers/ata/pata_hpt3x2n.c @@ -548,7 +548,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c); /* Now kick off ATA set up */ - return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); + return ata_pci_bmdma_init_one(dev, ppi, &hpt3x2n_sht, hpriv, 0); } static const struct pci_device_id hpt3x2n[] = { diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index 727a81ce4c9f..b63d5e2d4628 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -248,7 +248,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd"); } pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &hpt3x3_sht); } diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index b56e8f722d20..9f2889fe43b2 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c @@ -470,7 +470,7 @@ static int __devinit pata_icside_add_ports(struct pata_icside_info *info) pata_icside_setup_ioaddr(ap, info->base, info, info->port[i]); } - return ata_host_activate(host, ec->irq, ata_sff_interrupt, 0, + return ata_host_activate(host, ec->irq, ata_bmdma_interrupt, 0, &pata_icside_sht); } diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c index f971f0de88e6..4d142a2ab8fd 100644 --- a/drivers/ata/pata_it8213.c +++ b/drivers/ata/pata_it8213.c @@ -273,7 +273,7 @@ static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *en dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &it8213_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0); } static const struct pci_device_id it8213_pci_tbl[] = { diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c index 2bd2b002d14a..bf88f71a21f4 100644 --- a/drivers/ata/pata_it821x.c +++ b/drivers/ata/pata_it821x.c @@ -933,7 +933,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id) else ppi[0] = &info_smart; } - return ata_pci_sff_init_one(pdev, ppi, &it821x_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &it821x_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 565e01e6ac7c..cb3babbb7035 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c @@ -144,7 +144,7 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(pdev, ppi, &jmicron_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); } static const struct pci_device_id jmicron_pci_tbl[] = { diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index b5b48e703cb7..76640ac76888 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -1110,7 +1110,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv, /* Start it up */ priv->irq = irq; - return ata_host_activate(priv->host, irq, ata_sff_interrupt, 0, + return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0, &pata_macio_sht); } diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c index e8ca02e5a71d..dd38083dcbeb 100644 --- a/drivers/ata/pata_marvell.c +++ b/drivers/ata/pata_marvell.c @@ -153,7 +153,7 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i return -ENODEV; } #endif - return ata_pci_sff_init_one(pdev, ppi, &marvell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &marvell_sht, NULL, 0); } static const struct pci_device_id marvell_pci_tbl[] = { diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index 36afe2c1c747..f087ab55b1df 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -659,7 +659,7 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv, ata_port_desc(ap, "ata_regs 0x%lx", raw_ata_regs); /* activate host */ - return ata_host_activate(host, priv->ata_irq, ata_sff_interrupt, 0, + return ata_host_activate(host, priv->ata_irq, ata_bmdma_interrupt, 0, &mpc52xx_ata_sht); } diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c index 94f979a7f4f7..3eb921c746a1 100644 --- a/drivers/ata/pata_netcell.c +++ b/drivers/ata/pata_netcell.c @@ -82,7 +82,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e ata_pci_bmdma_clear_simplex(pdev); /* And let the library code do the work */ - return ata_pci_sff_init_one(pdev, port_info, &netcell_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, port_info, &netcell_sht, NULL, 0); } static const struct pci_device_id netcell_pci_tbl[] = { diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index dd53a66b19e3..cc50bd09aa26 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -149,7 +149,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ninja32_program(base); /* FIXME: Should we disable them at remove ? */ - return ata_host_activate(host, dev->irq, ata_sff_interrupt, + return ata_host_activate(host, dev->irq, ata_bmdma_interrupt, IRQF_SHARED, &ninja32_sht); } diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index fdbba2d76d3e..605f198f958c 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -380,7 +380,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e ns87415_fixup(pdev); - return ata_pci_sff_init_one(pdev, ppi, &ns87415_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &ns87415_sht, NULL, 0); } static const struct pci_device_id ns87415_pci_tbl[] = { diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 3001109352ea..06ddd91ffeda 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -749,20 +749,6 @@ static void octeon_cf_dev_config(struct ata_device *dev) dev->max_sectors = min(dev->max_sectors, 4095U); } -/* - * Trap if driver tries to do standard bmdma commands. They are not - * supported. - */ -static void unreachable_qc(struct ata_queued_cmd *qc) -{ - BUG(); -} - -static u8 unreachable_port(struct ata_port *ap) -{ - BUG(); -} - /* * We don't do ATAPI DMA so return 0. */ @@ -804,10 +790,6 @@ static struct ata_port_operations octeon_cf_ops = { .sff_dev_select = octeon_cf_dev_select, .sff_irq_on = octeon_cf_irq_on, .sff_irq_clear = octeon_cf_irq_clear, - .bmdma_setup = unreachable_qc, - .bmdma_start = unreachable_qc, - .bmdma_stop = unreachable_qc, - .bmdma_status = unreachable_port, .cable_detect = ata_cable_40wire, .set_piomode = octeon_cf_set_piomode, .set_dmamode = octeon_cf_set_dmamode, diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c index 988ef2627be3..b811c1636204 100644 --- a/drivers/ata/pata_oldpiix.c +++ b/drivers/ata/pata_oldpiix.c @@ -248,7 +248,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0); } static const struct pci_device_id oldpiix_pci_tbl[] = { diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c index 76b7d12b1e8d..0852cd07de08 100644 --- a/drivers/ata/pata_optidma.c +++ b/drivers/ata/pata_optidma.c @@ -429,7 +429,7 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (optiplus_with_udma(dev)) ppi[0] = &info_82c700_udma; - return ata_pci_sff_init_one(dev, ppi, &optidma_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &optidma_sht, NULL, 0); } static const struct pci_device_id optidma[] = { diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 09f1f22c0307..b18351122525 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c @@ -754,7 +754,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de return -EIO; pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &pdc2027x_sht); } diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index fa1e2f3bc0fd..c39f213e1bbc 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -337,7 +337,7 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id return -ENODEV; } } - return ata_pci_sff_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); } static const struct pci_device_id pdc202xx[] = { diff --git a/drivers/ata/pata_piccolo.c b/drivers/ata/pata_piccolo.c index 981615414849..cb01bf9496fe 100644 --- a/drivers/ata/pata_piccolo.c +++ b/drivers/ata/pata_piccolo.c @@ -95,7 +95,7 @@ static int ata_tosh_init_one(struct pci_dev *dev, const struct pci_device_id *id }; const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info }; /* Just one port for the moment */ - return ata_pci_sff_init_one(dev, ppi, &tosh_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &tosh_sht, NULL, 0); } static struct pci_device_id ata_tosh[] = { diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c index a5fa388e5398..8574b31f1773 100644 --- a/drivers/ata/pata_radisys.c +++ b/drivers/ata/pata_radisys.c @@ -227,7 +227,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &radisys_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0); } static const struct pci_device_id radisys_pci_tbl[] = { diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c index 37092cfd7bc6..5fbe9b166c69 100644 --- a/drivers/ata/pata_rdc.c +++ b/drivers/ata/pata_rdc.c @@ -344,7 +344,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, */ pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg); - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; host->private_data = hpriv; @@ -354,7 +354,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht); } static void rdc_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index 6b5b63a2fd8e..e2c18257adff 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -237,7 +237,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, NULL }; - return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sc1200_sht, NULL, 0); } static const struct pci_device_id sc1200[] = { diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index 6f6193b707cb..d9db3f8d60ef 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -875,7 +875,7 @@ static void scc_postreset(struct ata_link *link, unsigned int *classes) * scc_irq_clear - Clear PCI IDE BMDMA interrupt. * @ap: Port associated with this ATA transaction. * - * Note: Original code is ata_sff_irq_clear(). + * Note: Original code is ata_bmdma_irq_clear(). */ static void scc_irq_clear (struct ata_port *ap) @@ -1105,7 +1105,7 @@ static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &scc_sht); } diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c index 86b3d0133c7c..e97b32f03a6e 100644 --- a/drivers/ata/pata_sch.c +++ b/drivers/ata/pata_sch.c @@ -179,7 +179,7 @@ static int __devinit sch_init_one(struct pci_dev *pdev, dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(pdev, ppi, &sch_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0); } static int __init sch_init(void) diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 43ea389df2b3..86dd714e3e1d 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c @@ -460,7 +460,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ata_pci_bmdma_clear_simplex(pdev); - return ata_pci_sff_init_one(pdev, ppi, &serverworks_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 43faf106f647..d3190d7ec304 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -374,11 +374,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev, ata_sff_std_ports(&host->ports[1]->ioaddr); /* Register & activate */ - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sil680_sht); use_ioports: - return ata_pci_sff_init_one(pdev, ppi, &sil680_sht, NULL, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sil680_sht, NULL, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c index b6708032f321..60cea13cccce 100644 --- a/drivers/ata/pata_sis.c +++ b/drivers/ata/pata_sis.c @@ -826,7 +826,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) sis_fixup(pdev, chipset); - return ata_pci_sff_init_one(pdev, ppi, &sis_sht, chipset, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &sis_sht, chipset, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 733b042a7469..98548f640c8e 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -316,7 +316,7 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; pci_write_config_dword(dev, 0x40, val); - return ata_pci_sff_init_one(dev, ppi, &sl82c105_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0); } static const struct pci_device_id sl82c105[] = { diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c index 48f50600ed2a..0d1f89e571dd 100644 --- a/drivers/ata/pata_triflex.c +++ b/drivers/ata/pata_triflex.c @@ -201,7 +201,7 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) if (!printed_version++) dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n"); - return ata_pci_sff_init_one(dev, ppi, &triflex_sht, NULL, 0); + return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0); } static const struct pci_device_id triflex[] = { diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 7e3e0a5598b7..5e659885de16 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -627,7 +627,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } /* We have established the device type, now fire it up */ - return ata_pci_sff_init_one(pdev, ppi, &via_sht, (void *)config, 0); + return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); } #ifdef CONFIG_PM diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index f3471bc949d3..a476cd99b95d 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -675,8 +675,6 @@ static struct ata_port_operations mv5_ops = { .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, - .error_handler = ata_std_error_handler, /* avoid SFF EH */ - .post_internal_cmd = ATA_OP_NULL, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, @@ -2813,7 +2811,7 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause) } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) - ata_sff_host_intr(ap, qc); + ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index baa8f0d2c86f..6fd114784116 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -920,7 +920,7 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat) } /* handle interrupt */ - return ata_sff_host_intr(ap, qc); + return ata_bmdma_port_intr(ap, qc); } static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) @@ -1100,7 +1100,7 @@ static void nv_adma_irq_clear(struct ata_port *ap) u32 notifier_clears[2]; if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); return; } @@ -1505,7 +1505,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { - handled += ata_sff_host_intr(ap, qc); + handled += ata_bmdma_port_intr(ap, qc); } else { /* * No request pending? Clear interrupt status @@ -2430,7 +2430,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ppi[0] = &nv_port_info[type]; ipriv = ppi[0]->private_data; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index d533b3d20ca1..daeebf19a6a9 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -120,8 +120,6 @@ static void qs_host_stop(struct ata_host *host); static void qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); -static void qs_bmdma_stop(struct ata_queued_cmd *qc); -static u8 qs_bmdma_status(struct ata_port *ap); static void qs_freeze(struct ata_port *ap); static void qs_thaw(struct ata_port *ap); static int qs_prereset(struct ata_link *link, unsigned long deadline); @@ -137,8 +135,6 @@ static struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, - .bmdma_stop = qs_bmdma_stop, - .bmdma_status = qs_bmdma_status, .qc_prep = qs_qc_prep, .qc_issue = qs_qc_issue, @@ -190,16 +186,6 @@ static int qs_check_atapi_dma(struct ata_queued_cmd *qc) return 1; /* ATAPI DMA not supported */ } -static void qs_bmdma_stop(struct ata_queued_cmd *qc) -{ - /* nothing */ -} - -static u8 qs_bmdma_status(struct ata_port *ap) -{ - return 0; -} - static inline void qs_enter_reg_mode(struct ata_port *ap) { u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000); @@ -454,7 +440,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host *host) if (!pp || pp->state != qs_state_mmio) continue; if (!(qc->tf.flags & ATA_TFLAG_POLLING)) - handled |= ata_sff_host_intr(ap, qc); + handled |= ata_sff_port_intr(ap, qc); } return handled; } diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 2dda312b6b9a..3a4f84219719 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -503,7 +503,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) goto err_hsm; /* ack bmdma irq events */ - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* kick HSM in the ass */ ata_sff_hsm_move(ap, qc, status, 0); @@ -584,7 +584,7 @@ static void sil_thaw(struct ata_port *ap) /* clear IRQ */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); /* turn on SATA IRQ if supported */ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ)) diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index f8a91bfd66a8..2bfe3ae03976 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c @@ -279,7 +279,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; @@ -308,7 +308,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &sis_sht); } diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 101fd6a19829..7d9db4aaf07e 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c @@ -502,7 +502,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &k2_sata_sht); } diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index d8dac17dc2c8..b8578c32d344 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c @@ -242,7 +242,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); pci_intx(pdev, 1); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &uli_sht); } diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 08f65492cc81..101d8c219caf 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -308,7 +308,7 @@ static void svia_noop_freeze(struct ata_port *ap) * certain way. Leave it alone and just clear pending IRQ. */ ap->ops->sff_check_status(ap); - ata_sff_irq_clear(ap); + ata_bmdma_irq_clear(ap); } /** @@ -463,7 +463,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -520,7 +520,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) struct ata_host *host; int i, rc; - rc = ata_pci_sff_prepare_host(pdev, ppi, &host); + rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); if (rc) return rc; *r_host = host; @@ -628,7 +628,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) svia_configure(pdev); pci_set_master(pdev); - return ata_host_activate(host, pdev->irq, ata_sff_interrupt, + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &svia_sht); } diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c index 2107952ebff1..b777176ff494 100644 --- a/drivers/ata/sata_vsc.c +++ b/drivers/ata/sata_vsc.c @@ -245,7 +245,7 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap) qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING))) - handled = ata_sff_host_intr(ap, qc); + handled = ata_bmdma_port_intr(ap, qc); /* We received an interrupt during a polled command, * or some other spurious condition. Interrupt reporting diff --git a/drivers/base/topology.c b/drivers/base/topology.c index bf6b13206d00..9fc630ce1ddb 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -162,7 +162,7 @@ static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, topology_remove_dev(cpu); break; } - return rc ? NOTIFY_BAD : NOTIFY_OK; + return notifier_from_errno(rc); } static int __cpuinit topology_sysfs_init(void) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index e21175be25d0..f09fc0e2062d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -1121,5 +1121,12 @@ config DEVPORT source "drivers/s390/char/Kconfig" +config RAMOOPS + tristate "Log panic/oops to a RAM buffer" + default n + help + This enables panic and oops messages to be logged to a circular + buffer in RAM where it can be read back at some later point. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index d39be4cf1f5d..88d6eac69754 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -108,6 +108,7 @@ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o obj-$(CONFIG_TCG_TPM) += tpm/ obj-$(CONFIG_PS3_FLASH) += ps3flash.o +obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 67ea3a60de74..70312da4c968 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -384,7 +384,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) { u32 httfea,baseaddr,enuscr; struct pci_dev *dev1; - int i; + int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up ULi AGP\n"); @@ -400,15 +400,18 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) if (i == ARRAY_SIZE(uli_sizes)) { dev_info(&pdev->dev, "no ULi size found for %d\n", size); - return -ENODEV; + ret = -ENODEV; + goto put; } /* shadow x86-64 registers into ULi registers */ pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ - if ((httfea & 0x7fff) >> (32 - 25)) - return -ENODEV; + if ((httfea & 0x7fff) >> (32 - 25)) { + ret = -ENODEV; + goto put; + } httfea = (httfea& 0x7fff) << 25; @@ -420,9 +423,10 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) enuscr= httfea+ (size * 1024 * 1024) - 1; pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea); pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr); - + ret = 0; +put: pci_dev_put(dev1); - return 0; + return ret; } @@ -441,7 +445,7 @@ static int nforce3_agp_init(struct pci_dev *pdev) { u32 tmp, apbase, apbar, aplimit; struct pci_dev *dev1; - int i; + int i, ret; unsigned size = amd64_fetch_size(); dev_info(&pdev->dev, "setting up Nforce3 AGP\n"); @@ -458,7 +462,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) if (i == ARRAY_SIZE(nforce3_sizes)) { dev_info(&pdev->dev, "no NForce3 size found for %d\n", size); - return -ENODEV; + ret = -ENODEV; + goto put; } pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp); @@ -472,7 +477,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { dev_info(&pdev->dev, "aperture base > 4G\n"); - return -ENODEV; + ret = -ENODEV; + goto put; } apbase = (apbase & 0x7fff) << 25; @@ -488,9 +494,11 @@ static int nforce3_agp_init(struct pci_dev *pdev) pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit); + ret = 0; +put: pci_dev_put(dev1); - return 0; + return ret; } static int __devinit agp_amd64_probe(struct pci_dev *pdev, diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 56b27671adc4..4f8d60c25a98 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -84,6 +84,7 @@ static char *serial_version = "4.30"; #include #include #include +#include #include @@ -1954,29 +1955,16 @@ static const struct tty_operations serial_ops = { /* * The serial driver boot-time initialization code! */ -static int __init rs_init(void) +static int __init amiga_serial_probe(struct platform_device *pdev) { unsigned long flags; struct serial_state * state; int error; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_SERIAL)) - return -ENODEV; - serial_driver = alloc_tty_driver(1); if (!serial_driver) return -ENOMEM; - /* - * We request SERDAT and SERPER only, because the serial registers are - * too spreaded over the custom register space - */ - if (!request_mem_region(CUSTOM_PHYSADDR+0x30, 4, - "amiserial [Paula]")) { - error = -EBUSY; - goto fail_put_tty_driver; - } - IRQ_ports = NULL; show_serial_version(); @@ -1998,7 +1986,7 @@ static int __init rs_init(void) error = tty_register_driver(serial_driver); if (error) - goto fail_release_mem_region; + goto fail_put_tty_driver; state = rs_table; state->magic = SSTATE_MAGIC; @@ -2050,23 +2038,24 @@ static int __init rs_init(void) ciab.ddra |= (SER_DTR | SER_RTS); /* outputs */ ciab.ddra &= ~(SER_DCD | SER_CTS | SER_DSR); /* inputs */ + platform_set_drvdata(pdev, state); + return 0; fail_free_irq: free_irq(IRQ_AMIGA_TBE, state); fail_unregister: tty_unregister_driver(serial_driver); -fail_release_mem_region: - release_mem_region(CUSTOM_PHYSADDR+0x30, 4); fail_put_tty_driver: put_tty_driver(serial_driver); return error; } -static __exit void rs_exit(void) +static int __exit amiga_serial_remove(struct platform_device *pdev) { int error; - struct async_struct *info = rs_table[0].info; + struct serial_state *state = platform_get_drvdata(pdev); + struct async_struct *info = state->info; /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ tasklet_kill(&info->tlet); @@ -2075,19 +2064,38 @@ static __exit void rs_exit(void) error); put_tty_driver(serial_driver); - if (info) { - rs_table[0].info = NULL; - kfree(info); - } + rs_table[0].info = NULL; + kfree(info); free_irq(IRQ_AMIGA_TBE, rs_table); free_irq(IRQ_AMIGA_RBF, rs_table); - release_mem_region(CUSTOM_PHYSADDR+0x30, 4); + platform_set_drvdata(pdev, NULL); + + return error; +} + +static struct platform_driver amiga_serial_driver = { + .remove = __exit_p(amiga_serial_remove), + .driver = { + .name = "amiga-serial", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_serial_init(void) +{ + return platform_driver_probe(&amiga_serial_driver, amiga_serial_probe); +} + +module_init(amiga_serial_init); + +static void __exit amiga_serial_exit(void) +{ + platform_driver_unregister(&amiga_serial_driver); } -module_init(rs_init) -module_exit(rs_exit) +module_exit(amiga_serial_exit); #if defined(CONFIG_SERIAL_CONSOLE) && !defined(MODULE) @@ -2154,3 +2162,4 @@ console_initcall(amiserial_console_init); #endif /* CONFIG_SERIAL_CONSOLE && !MODULE */ MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-serial"); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 63313a33ba5f..f4ae0e0fb631 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -703,14 +703,9 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) /* In general, the device is only openable by root anyway, so we're not particularly concerned that bogus ioctls can flood the console. */ - adgl = kmalloc(sizeof(struct st_ram_io), GFP_KERNEL); - if (!adgl) - return -ENOMEM; - - if (copy_from_user(adgl, argp, sizeof(struct st_ram_io))) { - kfree(adgl); - return -EFAULT; - } + adgl = memdup_user(argp, sizeof(struct st_ram_io)); + if (IS_ERR(adgl)) + return PTR_ERR(adgl); lock_kernel(); IndexCard = adgl->num_card-1; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c6ad4234378d..4f3f8c9ec262 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2505,12 +2505,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, return rv; } - printk(KERN_INFO - "ipmi: Found new BMC (man_id: 0x%6.6x, " - " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", - bmc->id.manufacturer_id, - bmc->id.product_id, - bmc->id.device_id); + dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " + "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + bmc->id.manufacturer_id, + bmc->id.product_id, + bmc->id.device_id); } /* @@ -4037,8 +4036,8 @@ static void ipmi_request_event(void) static struct timer_list ipmi_timer; -/* Call every ~100 ms. */ -#define IPMI_TIMEOUT_TIME 100 +/* Call every ~1000 ms. */ +#define IPMI_TIMEOUT_TIME 1000 /* How many jiffies does it take to get to the timeout time. */ #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 47ffe4a90a95..35603dd4e6c5 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -107,6 +107,14 @@ enum si_type { }; static char *si_to_str[] = { "kcs", "smic", "bt" }; +enum ipmi_addr_src { + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, + SI_PCI, SI_DEVICETREE, SI_DEFAULT +}; +static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", + "ACPI", "SMBIOS", "PCI", + "device-tree", "default" }; + #define DEVICE_NAME "ipmi_si" static struct platform_driver ipmi_driver = { @@ -188,7 +196,7 @@ struct smi_info { int (*irq_setup)(struct smi_info *info); void (*irq_cleanup)(struct smi_info *info); unsigned int io_size; - char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ + enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ void (*addr_source_cleanup)(struct smi_info *info); void *addr_source_data; @@ -300,6 +308,7 @@ static int num_max_busy_us; static int unload_when_empty = 1; +static int add_smi(struct smi_info *smi); static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *to_clean); @@ -314,9 +323,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, { /* Deliver the message to the upper layer with the lock released. */ - spin_unlock(&(smi_info->si_lock)); - ipmi_smi_msg_received(smi_info->intf, msg); - spin_lock(&(smi_info->si_lock)); + + if (smi_info->run_to_completion) { + ipmi_smi_msg_received(smi_info->intf, msg); + } else { + spin_unlock(&(smi_info->si_lock)); + ipmi_smi_msg_received(smi_info->intf, msg); + spin_lock(&(smi_info->si_lock)); + } } static void return_hosed_msg(struct smi_info *smi_info, int cCode) @@ -445,6 +459,9 @@ static inline void disable_si_irq(struct smi_info *smi_info) if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { start_disable_irq(smi_info); smi_info->interrupt_disabled = 1; + if (!atomic_read(&smi_info->stop_operation)) + mod_timer(&smi_info->si_timer, + jiffies + SI_TIMEOUT_JIFFIES); } } @@ -576,9 +593,8 @@ static void handle_transaction_done(struct smi_info *smi_info) smi_info->handlers->get_result(smi_info->si_sm, msg, 3); if (msg[2] != 0) { /* Error clearing flags */ - printk(KERN_WARNING - "ipmi_si: Error clearing flags: %2.2x\n", - msg[2]); + dev_warn(smi_info->dev, + "Error clearing flags: %2.2x\n", msg[2]); } if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) start_enable_irq(smi_info); @@ -670,9 +686,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not enable interrupts" - ", failed get, using polled mode.\n"); + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed get, using polled mode.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -693,11 +708,11 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not enable interrupts" - ", failed set, using polled mode.\n"); - } + if (msg[2] != 0) + dev_warn(smi_info->dev, "Could not enable interrupts" + ", failed set, using polled mode.\n"); + else + smi_info->interrupt_disabled = 0; smi_info->si_state = SI_NORMAL; break; } @@ -709,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not disable interrupts" - ", failed get.\n"); + dev_warn(smi_info->dev, "Could not disable interrupts" + ", failed get.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -733,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Could not disable interrupts" - ", failed set.\n"); + dev_warn(smi_info->dev, "Could not disable interrupts" + ", failed set.\n"); } smi_info->si_state = SI_NORMAL; break; @@ -877,6 +890,11 @@ static void sender(void *send_info, printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif + mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + if (smi_info->run_to_completion) { /* * If we are running to completion, then throw it in @@ -997,6 +1015,8 @@ static int ipmi_thread(void *data) ; /* do nothing */ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) schedule(); + else if (smi_result == SI_SM_IDLE) + schedule_timeout_interruptible(100); else schedule_timeout_interruptible(0); } @@ -1039,6 +1059,7 @@ static void smi_timeout(unsigned long data) unsigned long flags; unsigned long jiffies_now; long time_diff; + long timeout; #ifdef DEBUG_TIMING struct timeval t; #endif @@ -1059,9 +1080,9 @@ static void smi_timeout(unsigned long data) if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { /* Running with interrupts, only do long timeouts. */ - smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); - goto do_add_timer; + goto do_mod_timer; } /* @@ -1070,14 +1091,15 @@ static void smi_timeout(unsigned long data) */ if (smi_result == SI_SM_CALL_WITH_DELAY) { smi_inc_stat(smi_info, short_timeouts); - smi_info->si_timer.expires = jiffies + 1; + timeout = jiffies + 1; } else { smi_inc_stat(smi_info, long_timeouts); - smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; + timeout = jiffies + SI_TIMEOUT_JIFFIES; } - do_add_timer: - add_timer(&(smi_info->si_timer)); + do_mod_timer: + if (smi_result != SI_SM_IDLE) + mod_timer(&(smi_info->si_timer), timeout); } static irqreturn_t si_irq_handler(int irq, void *data) @@ -1144,10 +1166,10 @@ static int smi_start_processing(void *send_info, new_smi->thread = kthread_run(ipmi_thread, new_smi, "kipmi%d", new_smi->intf_num); if (IS_ERR(new_smi->thread)) { - printk(KERN_NOTICE "ipmi_si_intf: Could not start" - " kernel thread due to error %ld, only using" - " timers to drive the interface\n", - PTR_ERR(new_smi->thread)); + dev_notice(new_smi->dev, "Could not start" + " kernel thread due to error %ld, only using" + " timers to drive the interface\n", + PTR_ERR(new_smi->thread)); new_smi->thread = NULL; } } @@ -1308,14 +1330,13 @@ static int std_irq_setup(struct smi_info *info) DEVICE_NAME, info); if (rv) { - printk(KERN_WARNING - "ipmi_si: %s unable to claim interrupt %d," - " running polled\n", - DEVICE_NAME, info->irq); + dev_warn(info->dev, "%s unable to claim interrupt %d," + " running polled\n", + DEVICE_NAME, info->irq); info->irq = 0; } else { info->irq_cleanup = std_irq_cleanup; - printk(" Using irq %d\n", info->irq); + dev_info(info->dev, "Using irq %d\n", info->irq); } return rv; @@ -1406,8 +1427,8 @@ static int port_setup(struct smi_info *info) info->io.outputb = port_outl; break; default: - printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", - info->io.regsize); + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); return -EINVAL; } @@ -1529,8 +1550,8 @@ static int mem_setup(struct smi_info *info) break; #endif default: - printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", - info->io.regsize); + dev_warn(info->dev, "Invalid register size: %d\n", + info->io.regsize); return -EINVAL; } @@ -1755,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) goto out; } - info->addr_source = "hotmod"; + info->addr_source = SI_HOTMOD; info->si_type = si_type; info->io.addr_data = addr; info->io.addr_type = addr_space; @@ -1777,7 +1798,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) info->irq_setup = std_irq_setup; info->slave_addr = ipmb; - try_smi_init(info); + if (!add_smi(info)) + if (try_smi_init(info)) + cleanup_one_si(info); } else { /* remove */ struct smi_info *e, *tmp_e; @@ -1813,7 +1836,8 @@ static __devinit void hardcode_find_bmc(void) if (!info) return; - info->addr_source = "hardcoded"; + info->addr_source = SI_HARDCODED; + printk(KERN_INFO PFX "probing via hardcoded address\n"); if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { info->si_type = SI_KCS; @@ -1822,8 +1846,7 @@ static __devinit void hardcode_find_bmc(void) } else if (strcmp(si_type[i], "bt") == 0) { info->si_type = SI_BT; } else { - printk(KERN_WARNING - "ipmi_si: Interface type specified " + printk(KERN_WARNING PFX "Interface type specified " "for interface %d, was invalid: %s\n", i, si_type[i]); kfree(info); @@ -1841,11 +1864,9 @@ static __devinit void hardcode_find_bmc(void) info->io.addr_data = addrs[i]; info->io.addr_type = IPMI_MEM_ADDR_SPACE; } else { - printk(KERN_WARNING - "ipmi_si: Interface type specified " - "for interface %d, " - "but port and address were not set or " - "set to zero.\n", i); + printk(KERN_WARNING PFX "Interface type specified " + "for interface %d, but port and address were " + "not set or set to zero.\n", i); kfree(info); continue; } @@ -1863,7 +1884,9 @@ static __devinit void hardcode_find_bmc(void) info->irq_setup = std_irq_setup; info->slave_addr = slave_addrs[i]; - try_smi_init(info); + if (!add_smi(info)) + if (try_smi_init(info)) + cleanup_one_si(info); } } @@ -1923,15 +1946,13 @@ static int acpi_gpe_irq_setup(struct smi_info *info) &ipmi_acpi_gpe, info); if (status != AE_OK) { - printk(KERN_WARNING - "ipmi_si: %s unable to claim ACPI GPE %d," - " running polled\n", - DEVICE_NAME, info->irq); + dev_warn(info->dev, "%s unable to claim ACPI GPE %d," + " running polled\n", DEVICE_NAME, info->irq); info->irq = 0; return -EINVAL; } else { info->irq_cleanup = acpi_gpe_irq_cleanup; - printk(" Using ACPI GPE %d\n", info->irq); + dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); return 0; } } @@ -1989,8 +2010,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) u8 addr_space; if (spmi->IPMIlegacy != 1) { - printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); - return -ENODEV; + printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); + return -ENODEV; } if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) @@ -2000,11 +2021,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); + printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); return -ENOMEM; } - info->addr_source = "SPMI"; + info->addr_source = SI_SPMI; + printk(KERN_INFO PFX "probing via SPMI\n"); /* Figure out the interface type. */ switch (spmi->InterfaceType) { @@ -2018,8 +2040,8 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info->si_type = SI_BT; break; default: - printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", - spmi->InterfaceType); + printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", + spmi->InterfaceType); kfree(info); return -EIO; } @@ -2055,13 +2077,12 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) info->io.addr_type = IPMI_IO_ADDR_SPACE; } else { kfree(info); - printk(KERN_WARNING - "ipmi_si: Unknown ACPI I/O Address type\n"); + printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); return -EIO; } info->io.addr_data = spmi->addr.address; - try_smi_init(info); + add_smi(info); return 0; } @@ -2093,6 +2114,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, { struct acpi_device *acpi_dev; struct smi_info *info; + struct resource *res; acpi_handle handle; acpi_status status; unsigned long long tmp; @@ -2105,7 +2127,8 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, if (!info) return -ENOMEM; - info->addr_source = "ACPI"; + info->addr_source = SI_ACPI; + printk(KERN_INFO PFX "probing via ACPI\n"); handle = acpi_dev->handle; @@ -2125,22 +2148,26 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->si_type = SI_BT; break; default: - dev_info(&dev->dev, "unknown interface type %lld\n", tmp); + dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); goto err_free; } - if (pnp_port_valid(dev, 0)) { + res = pnp_get_resource(dev, IORESOURCE_IO, 0); + if (res) { info->io_setup = port_setup; info->io.addr_type = IPMI_IO_ADDR_SPACE; - info->io.addr_data = pnp_port_start(dev, 0); - } else if (pnp_mem_valid(dev, 0)) { - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - info->io.addr_data = pnp_mem_start(dev, 0); } else { + res = pnp_get_resource(dev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } + } + if (!res) { dev_err(&dev->dev, "no I/O or memory address\n"); goto err_free; } + info->io.addr_data = res->start; info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = DEFAULT_REGSPACING; @@ -2156,10 +2183,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->irq_setup = std_irq_setup; } - info->dev = &acpi_dev->dev; + info->dev = &dev->dev; pnp_set_drvdata(dev, info); - return try_smi_init(info); + dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", + res, info->io.regsize, info->io.regspacing, + info->irq); + + return add_smi(info); err_free: kfree(info); @@ -2264,12 +2295,12 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { - printk(KERN_ERR - "ipmi_si: Could not allocate SI data\n"); + printk(KERN_ERR PFX "Could not allocate SI data\n"); return; } - info->addr_source = "SMBIOS"; + info->addr_source = SI_SMBIOS; + printk(KERN_INFO PFX "probing via SMBIOS\n"); switch (ipmi_data->type) { case 0x01: /* KCS */ @@ -2299,8 +2330,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) default: kfree(info); - printk(KERN_WARNING - "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", + printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", ipmi_data->addr_space); return; } @@ -2318,7 +2348,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) if (info->irq) info->irq_setup = std_irq_setup; - try_smi_init(info); + add_smi(info); } static void __devinit dmi_find_bmc(void) @@ -2368,7 +2398,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, if (!info) return -ENOMEM; - info->addr_source = "PCI"; + info->addr_source = SI_PCI; + dev_info(&pdev->dev, "probing via PCI"); switch (class_type) { case PCI_ERMC_CLASSCODE_TYPE_SMIC: @@ -2385,15 +2416,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, default: kfree(info); - printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", - pci_name(pdev), class_type); + dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); return -ENOMEM; } rv = pci_enable_device(pdev); if (rv) { - printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", - pci_name(pdev)); + dev_err(&pdev->dev, "couldn't enable PCI device\n"); kfree(info); return rv; } @@ -2421,7 +2450,11 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, info->dev = &pdev->dev; pci_set_drvdata(pdev, info); - return try_smi_init(info); + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], info->io.regsize, info->io.regspacing, + info->irq); + + return add_smi(info); } static void __devexit ipmi_pci_remove(struct pci_dev *pdev) @@ -2473,7 +2506,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev, int ret; int proplen; - dev_info(&dev->dev, PFX "probing via device tree\n"); + dev_info(&dev->dev, "probing via device tree\n"); ret = of_address_to_resource(np, 0, &resource); if (ret) { @@ -2503,12 +2536,12 @@ static int __devinit ipmi_of_probe(struct of_device *dev, if (!info) { dev_err(&dev->dev, - PFX "could not allocate memory for OF probe\n"); + "could not allocate memory for OF probe\n"); return -ENOMEM; } info->si_type = (enum si_type) match->data; - info->addr_source = "device-tree"; + info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; if (resource.flags & IORESOURCE_IO) { @@ -2528,13 +2561,13 @@ static int __devinit ipmi_of_probe(struct of_device *dev, info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); info->dev = &dev->dev; - dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n", + dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", info->io.addr_data, info->io.regsize, info->io.regspacing, info->irq); dev_set_drvdata(&dev->dev, info); - return try_smi_init(info); + return add_smi(info); } static int __devexit ipmi_of_remove(struct of_device *dev) @@ -2643,9 +2676,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING - "ipmi_si: Error getting response from get global," - " enables command, the event buffer is not" + printk(KERN_WARNING PFX "Error getting response from get" + " global enables command, the event buffer is not" " enabled.\n"); goto out; } @@ -2657,10 +2689,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || resp[2] != 0) { - printk(KERN_WARNING - "ipmi_si: Invalid return from get global" - " enables command, cannot enable the event" - " buffer.\n"); + printk(KERN_WARNING PFX "Invalid return from get global" + " enables command, cannot enable the event buffer.\n"); rv = -EINVAL; goto out; } @@ -2676,9 +2706,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) rv = wait_for_msg_done(smi_info); if (rv) { - printk(KERN_WARNING - "ipmi_si: Error getting response from set global," - " enables command, the event buffer is not" + printk(KERN_WARNING PFX "Error getting response from set" + " global, enables command, the event buffer is not" " enabled.\n"); goto out; } @@ -2689,10 +2718,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info) if (resp_len < 3 || resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { - printk(KERN_WARNING - "ipmi_si: Invalid return from get global," - "enables command, not enable the event" - " buffer.\n"); + printk(KERN_WARNING PFX "Invalid return from get global," + "enables command, not enable the event buffer.\n"); rv = -EINVAL; goto out; } @@ -2951,7 +2978,7 @@ static __devinit void default_find_bmc(void) if (!info) return; - info->addr_source = NULL; + info->addr_source = SI_DEFAULT; info->si_type = ipmi_defaults[i].type; info->io_setup = port_setup; @@ -2963,14 +2990,16 @@ static __devinit void default_find_bmc(void) info->io.regsize = DEFAULT_REGSPACING; info->io.regshift = 0; - if (try_smi_init(info) == 0) { - /* Found one... */ - printk(KERN_INFO "ipmi_si: Found default %s state" - " machine at %s address 0x%lx\n", - si_to_str[info->si_type], - addr_space_to_str[info->io.addr_type], - info->io.addr_data); - return; + if (add_smi(info) == 0) { + if ((try_smi_init(info)) == 0) { + /* Found one... */ + printk(KERN_INFO PFX "Found default %s" + " state machine at %s address 0x%lx\n", + si_to_str[info->si_type], + addr_space_to_str[info->io.addr_type], + info->io.addr_data); + } else + cleanup_one_si(info); } } } @@ -2989,34 +3018,48 @@ static int is_new_interface(struct smi_info *info) return 1; } -static int try_smi_init(struct smi_info *new_smi) +static int add_smi(struct smi_info *new_smi) { - int rv; - int i; - - if (new_smi->addr_source) { - printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" - " machine at %s address 0x%lx, slave address 0x%x," - " irq %d\n", - new_smi->addr_source, - si_to_str[new_smi->si_type], - addr_space_to_str[new_smi->io.addr_type], - new_smi->io.addr_data, - new_smi->slave_addr, new_smi->irq); - } + int rv = 0; + printk(KERN_INFO PFX "Adding %s-specified %s state machine", + ipmi_addr_src_to_str[new_smi->addr_source], + si_to_str[new_smi->si_type]); mutex_lock(&smi_infos_lock); if (!is_new_interface(new_smi)) { - printk(KERN_WARNING "ipmi_si: duplicate interface\n"); + printk(KERN_CONT PFX "duplicate interface\n"); rv = -EBUSY; goto out_err; } + printk(KERN_CONT "\n"); + /* So we know not to free it unless we have allocated one. */ new_smi->intf = NULL; new_smi->si_sm = NULL; new_smi->handlers = NULL; + list_add_tail(&new_smi->link, &smi_infos); + +out_err: + mutex_unlock(&smi_infos_lock); + return rv; +} + +static int try_smi_init(struct smi_info *new_smi) +{ + int rv = 0; + int i; + + printk(KERN_INFO PFX "Trying %s-specified %s state" + " machine at %s address 0x%lx, slave address 0x%x," + " irq %d\n", + ipmi_addr_src_to_str[new_smi->addr_source], + si_to_str[new_smi->si_type], + addr_space_to_str[new_smi->io.addr_type], + new_smi->io.addr_data, + new_smi->slave_addr, new_smi->irq); + switch (new_smi->si_type) { case SI_KCS: new_smi->handlers = &kcs_smi_handlers; @@ -3039,7 +3082,8 @@ static int try_smi_init(struct smi_info *new_smi) /* Allocate the state machine's data and initialize it. */ new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); if (!new_smi->si_sm) { - printk(KERN_ERR "Could not allocate state machine memory\n"); + printk(KERN_ERR PFX + "Could not allocate state machine memory\n"); rv = -ENOMEM; goto out_err; } @@ -3049,7 +3093,7 @@ static int try_smi_init(struct smi_info *new_smi) /* Now that we know the I/O size, we can set up the I/O. */ rv = new_smi->io_setup(new_smi); if (rv) { - printk(KERN_ERR "Could not set up I/O space\n"); + printk(KERN_ERR PFX "Could not set up I/O space\n"); goto out_err; } @@ -3059,8 +3103,7 @@ static int try_smi_init(struct smi_info *new_smi) /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->addr_source) - printk(KERN_INFO "ipmi_si: Interface detection" - " failed\n"); + printk(KERN_INFO PFX "Interface detection failed\n"); rv = -ENODEV; goto out_err; } @@ -3072,7 +3115,7 @@ static int try_smi_init(struct smi_info *new_smi) rv = try_get_dev_id(new_smi); if (rv) { if (new_smi->addr_source) - printk(KERN_INFO "ipmi_si: There appears to be no BMC" + printk(KERN_INFO PFX "There appears to be no BMC" " at this location\n"); goto out_err; } @@ -3088,7 +3131,7 @@ static int try_smi_init(struct smi_info *new_smi) for (i = 0; i < SI_NUM_STATS; i++) atomic_set(&new_smi->stats[i], 0); - new_smi->interrupt_disabled = 0; + new_smi->interrupt_disabled = 1; atomic_set(&new_smi->stop_operation, 0); new_smi->intf_num = smi_num; smi_num++; @@ -3114,9 +3157,8 @@ static int try_smi_init(struct smi_info *new_smi) new_smi->pdev = platform_device_alloc("ipmi_si", new_smi->intf_num); if (!new_smi->pdev) { - printk(KERN_ERR - "ipmi_si_intf:" - " Unable to allocate platform device\n"); + printk(KERN_ERR PFX + "Unable to allocate platform device\n"); goto out_err; } new_smi->dev = &new_smi->pdev->dev; @@ -3124,9 +3166,8 @@ static int try_smi_init(struct smi_info *new_smi) rv = platform_device_add(new_smi->pdev); if (rv) { - printk(KERN_ERR - "ipmi_si_intf:" - " Unable to register system interface device:" + printk(KERN_ERR PFX + "Unable to register system interface device:" " %d\n", rv); goto out_err; @@ -3141,9 +3182,8 @@ static int try_smi_init(struct smi_info *new_smi) "bmc", new_smi->slave_addr); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to register device: error %d\n", - rv); + dev_err(new_smi->dev, "Unable to register device: error %d\n", + rv); goto out_err_stop_timer; } @@ -3151,9 +3191,7 @@ static int try_smi_init(struct smi_info *new_smi) type_file_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } @@ -3161,9 +3199,7 @@ static int try_smi_init(struct smi_info *new_smi) stat_file_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } @@ -3171,18 +3207,12 @@ static int try_smi_init(struct smi_info *new_smi) param_read_proc, new_smi); if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to create proc entry: %d\n", - rv); + dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); goto out_err_stop_timer; } - list_add_tail(&new_smi->link, &smi_infos); - - mutex_unlock(&smi_infos_lock); - - printk(KERN_INFO "IPMI %s interface initialized\n", - si_to_str[new_smi->si_type]); + dev_info(new_smi->dev, "IPMI %s interface initialized\n", + si_to_str[new_smi->si_type]); return 0; @@ -3191,11 +3221,17 @@ static int try_smi_init(struct smi_info *new_smi) wait_for_timer_and_thread(new_smi); out_err: - if (new_smi->intf) + new_smi->interrupt_disabled = 1; + + if (new_smi->intf) { ipmi_unregister_smi(new_smi->intf); + new_smi->intf = NULL; + } - if (new_smi->irq_cleanup) + if (new_smi->irq_cleanup) { new_smi->irq_cleanup(new_smi); + new_smi->irq_cleanup = NULL; + } /* * Wait until we know that we are out of any interrupt @@ -3208,18 +3244,21 @@ static int try_smi_init(struct smi_info *new_smi) if (new_smi->handlers) new_smi->handlers->cleanup(new_smi->si_sm); kfree(new_smi->si_sm); + new_smi->si_sm = NULL; } - if (new_smi->addr_source_cleanup) + if (new_smi->addr_source_cleanup) { new_smi->addr_source_cleanup(new_smi); - if (new_smi->io_cleanup) + new_smi->addr_source_cleanup = NULL; + } + if (new_smi->io_cleanup) { new_smi->io_cleanup(new_smi); + new_smi->io_cleanup = NULL; + } - if (new_smi->dev_registered) + if (new_smi->dev_registered) { platform_device_unregister(new_smi->pdev); - - kfree(new_smi); - - mutex_unlock(&smi_infos_lock); + new_smi->dev_registered = 0; + } return rv; } @@ -3229,6 +3268,8 @@ static __devinit int init_ipmi_si(void) int i; char *str; int rv; + struct smi_info *e; + enum ipmi_addr_src type = SI_INVALID; if (initialized) return 0; @@ -3237,9 +3278,7 @@ static __devinit int init_ipmi_si(void) /* Register the device drivers. */ rv = driver_register(&ipmi_driver.driver); if (rv) { - printk(KERN_ERR - "init_ipmi_si: Unable to register driver: %d\n", - rv); + printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); return rv; } @@ -3263,38 +3302,81 @@ static __devinit int init_ipmi_si(void) hardcode_find_bmc(); -#ifdef CONFIG_DMI - dmi_find_bmc(); -#endif + /* If the user gave us a device, they presumably want us to use it */ + mutex_lock(&smi_infos_lock); + if (!list_empty(&smi_infos)) { + mutex_unlock(&smi_infos_lock); + return 0; + } + mutex_unlock(&smi_infos_lock); -#ifdef CONFIG_ACPI - spmi_find_bmc(); +#ifdef CONFIG_PCI + rv = pci_register_driver(&ipmi_pci_driver); + if (rv) + printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); #endif + #ifdef CONFIG_ACPI pnp_register_driver(&ipmi_pnp_driver); #endif -#ifdef CONFIG_PCI - rv = pci_register_driver(&ipmi_pci_driver); - if (rv) - printk(KERN_ERR - "init_ipmi_si: Unable to register PCI driver: %d\n", - rv); +#ifdef CONFIG_DMI + dmi_find_bmc(); +#endif + +#ifdef CONFIG_ACPI + spmi_find_bmc(); #endif #ifdef CONFIG_PPC_OF of_register_platform_driver(&ipmi_of_platform_driver); #endif + /* We prefer devices with interrupts, but in the case of a machine + with multiple BMCs we assume that there will be several instances + of a given type so if we succeed in registering a type then also + try to register everything else of the same type */ + + mutex_lock(&smi_infos_lock); + list_for_each_entry(e, &smi_infos, link) { + /* Try to register a device if it has an IRQ and we either + haven't successfully registered a device yet or this + device has the same type as one we successfully registered */ + if (e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + + /* type will only have been set if we successfully registered an si */ + if (type) { + mutex_unlock(&smi_infos_lock); + return 0; + } + + /* Fall back to the preferred device */ + + list_for_each_entry(e, &smi_infos, link) { + if (!e->irq && (!type || e->addr_source == type)) { + if (!try_smi_init(e)) { + type = e->addr_source; + } + } + } + mutex_unlock(&smi_infos_lock); + + if (type) + return 0; + if (si_trydefaults) { mutex_lock(&smi_infos_lock); if (list_empty(&smi_infos)) { /* No BMC was found, try defaults. */ mutex_unlock(&smi_infos_lock); default_find_bmc(); - } else { + } else mutex_unlock(&smi_infos_lock); - } } mutex_lock(&smi_infos_lock); @@ -3308,8 +3390,8 @@ static __devinit int init_ipmi_si(void) of_unregister_platform_driver(&ipmi_of_platform_driver); #endif driver_unregister(&ipmi_driver.driver); - printk(KERN_WARNING - "ipmi_si: Unable to find any System Interface(s)\n"); + printk(KERN_WARNING PFX + "Unable to find any System Interface(s)\n"); return -ENODEV; } else { mutex_unlock(&smi_infos_lock); @@ -3320,7 +3402,7 @@ module_init(init_ipmi_si); static void cleanup_one_si(struct smi_info *to_clean) { - int rv; + int rv = 0; unsigned long flags; if (!to_clean) @@ -3364,14 +3446,16 @@ static void cleanup_one_si(struct smi_info *to_clean) schedule_timeout_uninterruptible(1); } - rv = ipmi_unregister_smi(to_clean->intf); + if (to_clean->intf) + rv = ipmi_unregister_smi(to_clean->intf); + if (rv) { - printk(KERN_ERR - "ipmi_si: Unable to unregister device: errno=%d\n", + printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", rv); } - to_clean->handlers->cleanup(to_clean->si_sm); + if (to_clean->handlers) + to_clean->handlers->cleanup(to_clean->si_sm); kfree(to_clean->si_sm); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index fdd37543aa79..02abfddce45a 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -287,12 +287,10 @@ static int register_device (int minor, struct pp_struct *pp) char *name; int fl; - name = kmalloc (strlen (CHRDEV) + 3, GFP_KERNEL); + name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; - sprintf (name, CHRDEV "%x", minor); - port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c index 606048b72bcf..85c004a518ee 100644 --- a/drivers/char/ps3flash.c +++ b/drivers/char/ps3flash.c @@ -305,8 +305,7 @@ static int ps3flash_flush(struct file *file, fl_owner_t id) return ps3flash_writeback(ps3flash_dev); } -static int ps3flash_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int ps3flash_fsync(struct file *file, int datasync) { return ps3flash_writeback(ps3flash_dev); } diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c new file mode 100644 index 000000000000..74f00b5ffa36 --- /dev/null +++ b/drivers/char/ramoops.c @@ -0,0 +1,162 @@ +/* + * RAM Oops/Panic logger + * + * Copyright (C) 2010 Marco Stornelli + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#define RAMOOPS_KERNMSG_HDR "====" +#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval)) + +#define RECORD_SIZE 4096 + +static ulong mem_address; +module_param(mem_address, ulong, 0400); +MODULE_PARM_DESC(mem_address, + "start of reserved RAM used to store oops/panic logs"); + +static ulong mem_size; +module_param(mem_size, ulong, 0400); +MODULE_PARM_DESC(mem_size, + "size of reserved RAM used to store oops/panic logs"); + +static int dump_oops = 1; +module_param(dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + +static struct ramoops_context { + struct kmsg_dumper dump; + void *virt_addr; + phys_addr_t phys_addr; + unsigned long size; + int count; + int max_count; +} oops_cxt; + +static void ramoops_do_dump(struct kmsg_dumper *dumper, + enum kmsg_dump_reason reason, const char *s1, unsigned long l1, + const char *s2, unsigned long l2) +{ + struct ramoops_context *cxt = container_of(dumper, + struct ramoops_context, dump); + unsigned long s1_start, s2_start; + unsigned long l1_cpy, l2_cpy; + int res; + char *buf; + struct timeval timestamp; + + /* Only dump oopses if dump_oops is set */ + if (reason == KMSG_DUMP_OOPS && !dump_oops) + return; + + buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); + memset(buf, '\0', RECORD_SIZE); + res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); + buf += res; + do_gettimeofday(×tamp); + res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec); + buf += res; + + l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE)); + l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy); + + s2_start = l2 - l2_cpy; + s1_start = l1 - l1_cpy; + + memcpy(buf, s1 + s1_start, l1_cpy); + memcpy(buf + l1_cpy, s2 + s2_start, l2_cpy); + + cxt->count = (cxt->count + 1) % cxt->max_count; +} + +static int __init ramoops_init(void) +{ + struct ramoops_context *cxt = &oops_cxt; + int err = -EINVAL; + + if (!mem_size) { + printk(KERN_ERR "ramoops: invalid size specification"); + goto fail3; + } + + rounddown_pow_of_two(mem_size); + + if (mem_size < RECORD_SIZE) { + printk(KERN_ERR "ramoops: size too small"); + goto fail3; + } + + cxt->max_count = mem_size / RECORD_SIZE; + cxt->count = 0; + cxt->size = mem_size; + cxt->phys_addr = mem_address; + + if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { + printk(KERN_ERR "ramoops: request mem region failed"); + err = -EINVAL; + goto fail3; + } + + cxt->virt_addr = ioremap(cxt->phys_addr, cxt->size); + if (!cxt->virt_addr) { + printk(KERN_ERR "ramoops: ioremap failed"); + goto fail2; + } + + cxt->dump.dump = ramoops_do_dump; + err = kmsg_dump_register(&cxt->dump); + if (err) { + printk(KERN_ERR "ramoops: registering kmsg dumper failed"); + goto fail1; + } + + return 0; + +fail1: + iounmap(cxt->virt_addr); +fail2: + release_mem_region(cxt->phys_addr, cxt->size); +fail3: + return err; +} + +static void __exit ramoops_exit(void) +{ + struct ramoops_context *cxt = &oops_cxt; + + if (kmsg_dump_unregister(&cxt->dump) < 0) + printk(KERN_WARNING "ramoops: could not unregister kmsg_dumper"); + + iounmap(cxt->virt_addr); + release_mem_region(cxt->phys_addr, cxt->size); +} + + +module_init(ramoops_init); +module_exit(ramoops_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marco Stornelli "); +MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index bd1d1164fec5..7cdb6ee569cd 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -3967,13 +3967,9 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) font.charcount = op->charcount; font.height = op->height; font.width = op->width; - font.data = kmalloc(size, GFP_KERNEL); - if (!font.data) - return -ENOMEM; - if (copy_from_user(font.data, op->data, size)) { - kfree(font.data); - return -EFAULT; - } + font.data = memdup_user(op->data, size); + if (IS_ERR(font.data)) + return PTR_ERR(font.data); acquire_console_sem(); if (vc->vc_sw->con_font_set) rc = vc->vc_sw->con_font_set(vc, &font, op->flags); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 12fdd3987a36..199488576a05 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -156,7 +156,7 @@ int cpuidle_enable_device(struct cpuidle_device *dev) if (dev->enabled) return 0; - if (!cpuidle_curr_driver || !cpuidle_curr_governor) + if (!cpuidle_get_driver() || !cpuidle_curr_governor) return -EIO; if (!dev->state_count) return -EINVAL; @@ -207,7 +207,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) { if (!dev->enabled) return; - if (!cpuidle_curr_driver || !cpuidle_curr_governor) + if (!cpuidle_get_driver() || !cpuidle_curr_governor) return; dev->enabled = 0; @@ -271,10 +271,11 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) { int ret; struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); if (!sys_dev) return -EINVAL; - if (!try_module_get(cpuidle_curr_driver->owner)) + if (!try_module_get(cpuidle_driver->owner)) return -EINVAL; init_completion(&dev->kobj_unregister); @@ -284,7 +285,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); if ((ret = cpuidle_add_sysfs(sys_dev))) { - module_put(cpuidle_curr_driver->owner); + module_put(cpuidle_driver->owner); return ret; } @@ -325,6 +326,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); void cpuidle_unregister_device(struct cpuidle_device *dev) { struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); if (dev->registered == 0) return; @@ -340,7 +342,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) cpuidle_resume_and_unlock(); - module_put(cpuidle_curr_driver->owner); + module_put(cpuidle_driver->owner); } EXPORT_SYMBOL_GPL(cpuidle_unregister_device); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 9476ba33ee2c..33e50d556f17 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -9,7 +9,6 @@ /* For internal use only */ extern struct cpuidle_governor *cpuidle_curr_governor; -extern struct cpuidle_driver *cpuidle_curr_driver; extern struct list_head cpuidle_governors; extern struct list_head cpuidle_detected_devices; extern struct mutex cpuidle_lock; diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 2257004fe33d..fd1601e3d125 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -14,7 +14,7 @@ #include "cpuidle.h" -struct cpuidle_driver *cpuidle_curr_driver; +static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); /** @@ -39,14 +39,26 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) EXPORT_SYMBOL_GPL(cpuidle_register_driver); +/** + * cpuidle_get_driver - return the current driver + */ +struct cpuidle_driver *cpuidle_get_driver(void) +{ + return cpuidle_curr_driver; +} +EXPORT_SYMBOL_GPL(cpuidle_get_driver); + /** * cpuidle_unregister_driver - unregisters a driver * @drv: the driver */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (!drv) + if (drv != cpuidle_curr_driver) { + WARN(1, "invalid cpuidle_unregister_driver(%s)\n", + drv->name); return; + } spin_lock(&cpuidle_driver_lock); cpuidle_curr_driver = NULL; diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0ba9c8b8ee74..0310ffaec9df 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -47,10 +47,11 @@ static ssize_t show_current_driver(struct sysdev_class *class, char *buf) { ssize_t ret; + struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); spin_lock(&cpuidle_driver_lock); - if (cpuidle_curr_driver) - ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name); + if (cpuidle_driver) + ret = sprintf(buf, "%s\n", cpuidle_driver->name); else ret = sprintf(buf, "none\n"); spin_unlock(&cpuidle_driver_lock); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1b8877922fb0..9e01e96fee94 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -166,6 +166,15 @@ config TIMB_DMA config ARCH_HAS_ASYNC_TX_FIND_CHANNEL bool +config PL330_DMA + tristate "DMA API Driver for PL330" + select DMA_ENGINE + depends on PL330 + help + Select if your platform has one or more PL330 DMACs. + You need to provide platform specific settings via + platform_data for a dma-pl330 device. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 20881426c1ac..0fe5ebbfda5d 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -22,3 +22,4 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o +obj-$(CONFIG_PL330_DMA) += pl330.o diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c new file mode 100644 index 000000000000..7c50f6dfd3f4 --- /dev/null +++ b/drivers/dma/pl330.c @@ -0,0 +1,866 @@ +/* linux/drivers/dma/pl330.c + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define NR_DEFAULT_DESC 16 + +enum desc_status { + /* In the DMAC pool */ + FREE, + /* + * Allocted to some channel during prep_xxx + * Also may be sitting on the work_list. + */ + PREP, + /* + * Sitting on the work_list and already submitted + * to the PL330 core. Not more than two descriptors + * of a channel can be BUSY at any time. + */ + BUSY, + /* + * Sitting on the channel work_list but xfer done + * by PL330 core + */ + DONE, +}; + +struct dma_pl330_chan { + /* Schedule desc completion */ + struct tasklet_struct task; + + /* DMA-Engine Channel */ + struct dma_chan chan; + + /* Last completed cookie */ + dma_cookie_t completed; + + /* List of to be xfered descriptors */ + struct list_head work_list; + + /* Pointer to the DMAC that manages this channel, + * NULL if the channel is available to be acquired. + * As the parent, this DMAC also provides descriptors + * to the channel. + */ + struct dma_pl330_dmac *dmac; + + /* To protect channel manipulation */ + spinlock_t lock; + + /* Token of a hardware channel thread of PL330 DMAC + * NULL if the channel is available to be acquired. + */ + void *pl330_chid; +}; + +struct dma_pl330_dmac { + struct pl330_info pif; + + /* DMA-Engine Device */ + struct dma_device ddma; + + /* Pool of descriptors available for the DMAC's channels */ + struct list_head desc_pool; + /* To protect desc_pool manipulation */ + spinlock_t pool_lock; + + /* Peripheral channels connected to this DMAC */ + struct dma_pl330_chan peripherals[0]; /* keep at end */ +}; + +struct dma_pl330_desc { + /* To attach to a queue as child */ + struct list_head node; + + /* Descriptor for the DMA Engine API */ + struct dma_async_tx_descriptor txd; + + /* Xfer for PL330 core */ + struct pl330_xfer px; + + struct pl330_reqcfg rqcfg; + struct pl330_req req; + + enum desc_status status; + + /* The channel which currently holds this desc */ + struct dma_pl330_chan *pchan; +}; + +static inline struct dma_pl330_chan * +to_pchan(struct dma_chan *ch) +{ + if (!ch) + return NULL; + + return container_of(ch, struct dma_pl330_chan, chan); +} + +static inline struct dma_pl330_desc * +to_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct dma_pl330_desc, txd); +} + +static inline void free_desc_list(struct list_head *list) +{ + struct dma_pl330_dmac *pdmac; + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch; + unsigned long flags; + + if (list_empty(list)) + return; + + /* Finish off the work list */ + list_for_each_entry(desc, list, node) { + dma_async_tx_callback callback; + void *param; + + /* All desc in a list belong to same channel */ + pch = desc->pchan; + callback = desc->txd.callback; + param = desc->txd.callback_param; + + if (callback) + callback(param); + + desc->pchan = NULL; + } + + pdmac = pch->dmac; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + list_splice_tail_init(list, &pdmac->desc_pool); + spin_unlock_irqrestore(&pdmac->pool_lock, flags); +} + +static inline void fill_queue(struct dma_pl330_chan *pch) +{ + struct dma_pl330_desc *desc; + int ret; + + list_for_each_entry(desc, &pch->work_list, node) { + + /* If already submitted */ + if (desc->status == BUSY) + break; + + ret = pl330_submit_req(pch->pl330_chid, + &desc->req); + if (!ret) { + desc->status = BUSY; + break; + } else if (ret == -EAGAIN) { + /* QFull or DMAC Dying */ + break; + } else { + /* Unacceptable request */ + desc->status = DONE; + dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", + __func__, __LINE__, desc->txd.cookie); + tasklet_schedule(&pch->task); + } + } +} + +static void pl330_tasklet(unsigned long data) +{ + struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; + struct dma_pl330_desc *desc, *_dt; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&pch->lock, flags); + + /* Pick up ripe tomatoes */ + list_for_each_entry_safe(desc, _dt, &pch->work_list, node) + if (desc->status == DONE) { + pch->completed = desc->txd.cookie; + list_move_tail(&desc->node, &list); + } + + /* Try to submit a req imm. next to the last completed cookie */ + fill_queue(pch); + + /* Make sure the PL330 Channel thread is active */ + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); + + spin_unlock_irqrestore(&pch->lock, flags); + + free_desc_list(&list); +} + +static void dma_pl330_rqcb(void *token, enum pl330_op_err err) +{ + struct dma_pl330_desc *desc = token; + struct dma_pl330_chan *pch = desc->pchan; + unsigned long flags; + + /* If desc aborted */ + if (!pch) + return; + + spin_lock_irqsave(&pch->lock, flags); + + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + tasklet_schedule(&pch->task); +} + +static int pl330_alloc_chan_resources(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_dmac *pdmac = pch->dmac; + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + pch->completed = chan->cookie = 1; + + pch->pl330_chid = pl330_request_channel(&pdmac->pif); + if (!pch->pl330_chid) { + spin_unlock_irqrestore(&pch->lock, flags); + return 0; + } + + tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); + + spin_unlock_irqrestore(&pch->lock, flags); + + return 1; +} + +static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_desc *desc; + unsigned long flags; + + /* Only supports DMA_TERMINATE_ALL */ + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; + + spin_lock_irqsave(&pch->lock, flags); + + /* FLUSH the PL330 Channel thread */ + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + + /* Mark all desc done */ + list_for_each_entry(desc, &pch->work_list, node) + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + pl330_tasklet((unsigned long) pch); + + return 0; +} + +static void pl330_free_chan_resources(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + tasklet_kill(&pch->task); + + pl330_release_channel(pch->pl330_chid); + pch->pl330_chid = NULL; + + spin_unlock_irqrestore(&pch->lock, flags); +} + +static enum dma_status +pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + dma_cookie_t last_done, last_used; + int ret; + + last_done = pch->completed; + last_used = chan->cookie; + + ret = dma_async_is_complete(cookie, last_done, last_used); + + dma_set_tx_state(txstate, last_done, last_used, 0); + + return ret; +} + +static void pl330_issue_pending(struct dma_chan *chan) +{ + pl330_tasklet((unsigned long) to_pchan(chan)); +} + +/* + * We returned the last one of the circular list of descriptor(s) + * from prep_xxx, so the argument to submit corresponds to the last + * descriptor of the list. + */ +static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_pl330_desc *desc, *last = to_desc(tx); + struct dma_pl330_chan *pch = to_pchan(tx->chan); + dma_cookie_t cookie; + unsigned long flags; + + spin_lock_irqsave(&pch->lock, flags); + + /* Assign cookies to all nodes */ + cookie = tx->chan->cookie; + + while (!list_empty(&last->node)) { + desc = list_entry(last->node.next, struct dma_pl330_desc, node); + + if (++cookie < 0) + cookie = 1; + desc->txd.cookie = cookie; + + list_move_tail(&desc->node, &pch->work_list); + } + + if (++cookie < 0) + cookie = 1; + last->txd.cookie = cookie; + + list_add_tail(&last->node, &pch->work_list); + + tx->chan->cookie = cookie; + + spin_unlock_irqrestore(&pch->lock, flags); + + return cookie; +} + +static inline void _init_desc(struct dma_pl330_desc *desc) +{ + desc->pchan = NULL; + desc->req.x = &desc->px; + desc->req.token = desc; + desc->rqcfg.swap = SWAP_NO; + desc->rqcfg.privileged = 0; + desc->rqcfg.insnaccess = 0; + desc->rqcfg.scctl = SCCTRL0; + desc->rqcfg.dcctl = DCCTRL0; + desc->req.cfg = &desc->rqcfg; + desc->req.xfer_cb = dma_pl330_rqcb; + desc->txd.tx_submit = pl330_tx_submit; + + INIT_LIST_HEAD(&desc->node); +} + +/* Returns the number of descriptors added to the DMAC pool */ +int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +{ + struct dma_pl330_desc *desc; + unsigned long flags; + int i; + + if (!pdmac) + return 0; + + desc = kmalloc(count * sizeof(*desc), flg); + if (!desc) + return 0; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + for (i = 0; i < count; i++) { + _init_desc(&desc[i]); + list_add_tail(&desc[i].node, &pdmac->desc_pool); + } + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return count; +} + +static struct dma_pl330_desc * +pluck_desc(struct dma_pl330_dmac *pdmac) +{ + struct dma_pl330_desc *desc = NULL; + unsigned long flags; + + if (!pdmac) + return NULL; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + if (!list_empty(&pdmac->desc_pool)) { + desc = list_entry(pdmac->desc_pool.next, + struct dma_pl330_desc, node); + + list_del_init(&desc->node); + + desc->status = PREP; + desc->txd.callback = NULL; + } + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return desc; +} + +static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) +{ + struct dma_pl330_dmac *pdmac = pch->dmac; + struct dma_pl330_peri *peri = pch->chan.private; + struct dma_pl330_desc *desc; + + /* Pluck one desc from the pool of DMAC */ + desc = pluck_desc(pdmac); + + /* If the DMAC pool is empty, alloc new */ + if (!desc) { + if (!add_desc(pdmac, GFP_ATOMIC, 1)) + return NULL; + + /* Try again */ + desc = pluck_desc(pdmac); + if (!desc) { + dev_err(pch->dmac->pif.dev, + "%s:%d ALERT!\n", __func__, __LINE__); + return NULL; + } + } + + /* Initialize the descriptor */ + desc->pchan = pch; + desc->txd.cookie = 0; + async_tx_ack(&desc->txd); + + desc->req.rqtype = peri->rqtype; + desc->req.peri = peri->peri_id; + + dma_async_tx_descriptor_init(&desc->txd, &pch->chan); + + return desc; +} + +static inline void fill_px(struct pl330_xfer *px, + dma_addr_t dst, dma_addr_t src, size_t len) +{ + px->next = NULL; + px->bytes = len; + px->dst_addr = dst; + px->src_addr = src; +} + +static struct dma_pl330_desc * +__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, + dma_addr_t src, size_t len) +{ + struct dma_pl330_desc *desc = pl330_get_desc(pch); + + if (!desc) { + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + return NULL; + } + + /* + * Ideally we should lookout for reqs bigger than + * those that can be programmed with 256 bytes of + * MC buffer, but considering a req size is seldom + * going to be word-unaligned and more than 200MB, + * we take it easy. + * Also, should the limit is reached we'd rather + * have the platform increase MC buffer size than + * complicating this API driver. + */ + fill_px(&desc->px, dst, src, len); + + return desc; +} + +/* Call after fixing burst size */ +static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) +{ + struct dma_pl330_chan *pch = desc->pchan; + struct pl330_info *pi = &pch->dmac->pif; + int burst_len; + + burst_len = pi->pcfg.data_bus_width / 8; + burst_len *= pi->pcfg.data_buf_dep; + burst_len >>= desc->rqcfg.brst_size; + + /* src/dst_burst_len can't be more than 16 */ + if (burst_len > 16) + burst_len = 16; + + while (burst_len > 1) { + if (!(len % (burst_len << desc->rqcfg.brst_size))) + break; + burst_len--; + } + + return burst_len; +} + +static struct dma_async_tx_descriptor * +pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct dma_pl330_desc *desc; + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_peri *peri = chan->private; + struct pl330_info *pi; + int burst; + + if (unlikely(!pch || !len || !peri)) + return NULL; + + if (peri->rqtype != MEMTOMEM) + return NULL; + + pi = &pch->dmac->pif; + + desc = __pl330_prep_dma_memcpy(pch, dst, src, len); + if (!desc) + return NULL; + + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 1; + + /* Select max possible burst size */ + burst = pi->pcfg.data_bus_width / 8; + + while (burst > 1) { + if (!(len % burst)) + break; + burst /= 2; + } + + desc->rqcfg.brst_size = 0; + while (burst != (1 << desc->rqcfg.brst_size)) + desc->rqcfg.brst_size++; + + desc->rqcfg.brst_len = get_burst_len(desc, len); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static struct dma_async_tx_descriptor * +pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_data_direction direction, + unsigned long flg) +{ + struct dma_pl330_desc *first, *desc = NULL; + struct dma_pl330_chan *pch = to_pchan(chan); + struct dma_pl330_peri *peri = chan->private; + struct scatterlist *sg; + unsigned long flags; + int i, burst_size; + dma_addr_t addr; + + if (unlikely(!pch || !sgl || !sg_len)) + return NULL; + + /* Make sure the direction is consistent */ + if ((direction == DMA_TO_DEVICE && + peri->rqtype != MEMTODEV) || + (direction == DMA_FROM_DEVICE && + peri->rqtype != DEVTOMEM)) { + dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", + __func__, __LINE__); + return NULL; + } + + addr = peri->fifo_addr; + burst_size = peri->burst_sz; + + first = NULL; + + for_each_sg(sgl, sg, sg_len, i) { + + desc = pl330_get_desc(pch); + if (!desc) { + struct dma_pl330_dmac *pdmac = pch->dmac; + + dev_err(pch->dmac->pif.dev, + "%s:%d Unable to fetch desc\n", + __func__, __LINE__); + if (!first) + return NULL; + + spin_lock_irqsave(&pdmac->pool_lock, flags); + + while (!list_empty(&first->node)) { + desc = list_entry(first->node.next, + struct dma_pl330_desc, node); + list_move_tail(&desc->node, &pdmac->desc_pool); + } + + list_move_tail(&first->node, &pdmac->desc_pool); + + spin_unlock_irqrestore(&pdmac->pool_lock, flags); + + return NULL; + } + + if (!first) + first = desc; + else + list_add_tail(&desc->node, &first->node); + + if (direction == DMA_TO_DEVICE) { + desc->rqcfg.src_inc = 1; + desc->rqcfg.dst_inc = 0; + fill_px(&desc->px, + addr, sg_dma_address(sg), sg_dma_len(sg)); + } else { + desc->rqcfg.src_inc = 0; + desc->rqcfg.dst_inc = 1; + fill_px(&desc->px, + sg_dma_address(sg), addr, sg_dma_len(sg)); + } + + desc->rqcfg.brst_size = burst_size; + desc->rqcfg.brst_len = 1; + } + + /* Return the last desc in the chain */ + desc->txd.flags = flg; + return &desc->txd; +} + +static irqreturn_t pl330_irq_handler(int irq, void *data) +{ + if (pl330_update(data)) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +static int __devinit +pl330_probe(struct amba_device *adev, struct amba_id *id) +{ + struct dma_pl330_platdata *pdat; + struct dma_pl330_dmac *pdmac; + struct dma_pl330_chan *pch; + struct pl330_info *pi; + struct dma_device *pd; + struct resource *res; + int i, ret, irq; + + pdat = adev->dev.platform_data; + + if (!pdat || !pdat->nr_valid_peri) { + dev_err(&adev->dev, "platform data missing\n"); + return -ENODEV; + } + + /* Allocate a new DMAC and its Channels */ + pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) + + sizeof(*pdmac), GFP_KERNEL); + if (!pdmac) { + dev_err(&adev->dev, "unable to allocate mem\n"); + return -ENOMEM; + } + + pi = &pdmac->pif; + pi->dev = &adev->dev; + pi->pl330_data = NULL; + pi->mcbufsz = pdat->mcbuf_sz; + + res = &adev->res; + request_mem_region(res->start, resource_size(res), "dma-pl330"); + + pi->base = ioremap(res->start, resource_size(res)); + if (!pi->base) { + ret = -ENXIO; + goto probe_err1; + } + + irq = adev->irq[0]; + ret = request_irq(irq, pl330_irq_handler, 0, + dev_name(&adev->dev), pi); + if (ret) + goto probe_err2; + + ret = pl330_add(pi); + if (ret) + goto probe_err3; + + INIT_LIST_HEAD(&pdmac->desc_pool); + spin_lock_init(&pdmac->pool_lock); + + /* Create a descriptor pool of default size */ + if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) + dev_warn(&adev->dev, "unable to allocate desc\n"); + + pd = &pdmac->ddma; + INIT_LIST_HEAD(&pd->channels); + + /* Initialize channel parameters */ + for (i = 0; i < pdat->nr_valid_peri; i++) { + struct dma_pl330_peri *peri = &pdat->peri[i]; + pch = &pdmac->peripherals[i]; + + switch (peri->rqtype) { + case MEMTOMEM: + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + break; + case MEMTODEV: + case DEVTOMEM: + dma_cap_set(DMA_SLAVE, pd->cap_mask); + break; + default: + dev_err(&adev->dev, "DEVTODEV Not Supported\n"); + continue; + } + + INIT_LIST_HEAD(&pch->work_list); + spin_lock_init(&pch->lock); + pch->pl330_chid = NULL; + pch->chan.private = peri; + pch->chan.device = pd; + pch->chan.chan_id = i; + pch->dmac = pdmac; + + /* Add the channel to the DMAC list */ + pd->chancnt++; + list_add_tail(&pch->chan.device_node, &pd->channels); + } + + pd->dev = &adev->dev; + + pd->device_alloc_chan_resources = pl330_alloc_chan_resources; + pd->device_free_chan_resources = pl330_free_chan_resources; + pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; + pd->device_tx_status = pl330_tx_status; + pd->device_prep_slave_sg = pl330_prep_slave_sg; + pd->device_control = pl330_control; + pd->device_issue_pending = pl330_issue_pending; + + ret = dma_async_device_register(pd); + if (ret) { + dev_err(&adev->dev, "unable to register DMAC\n"); + goto probe_err4; + } + + amba_set_drvdata(adev, pdmac); + + dev_info(&adev->dev, + "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + dev_info(&adev->dev, + "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", + pi->pcfg.data_buf_dep, + pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, + pi->pcfg.num_peri, pi->pcfg.num_events); + + return 0; + +probe_err4: + pl330_del(pi); +probe_err3: + free_irq(irq, pi); +probe_err2: + iounmap(pi->base); +probe_err1: + release_mem_region(res->start, resource_size(res)); + kfree(pdmac); + + return ret; +} + +static int __devexit pl330_remove(struct amba_device *adev) +{ + struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); + struct dma_pl330_chan *pch, *_p; + struct pl330_info *pi; + struct resource *res; + int irq; + + if (!pdmac) + return 0; + + amba_set_drvdata(adev, NULL); + + /* Idle the DMAC */ + list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + chan.device_node) { + + /* Remove the channel */ + list_del(&pch->chan.device_node); + + /* Flush the channel */ + pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_free_chan_resources(&pch->chan); + } + + pi = &pdmac->pif; + + pl330_del(pi); + + irq = adev->irq[0]; + free_irq(irq, pi); + + iounmap(pi->base); + + res = &adev->res; + release_mem_region(res->start, resource_size(res)); + + kfree(pdmac); + + return 0; +} + +static struct amba_id pl330_ids[] = { + { + .id = 0x00041330, + .mask = 0x000fffff, + }, + { 0, 0 }, +}; + +static struct amba_driver pl330_driver = { + .drv = { + .owner = THIS_MODULE, + .name = "dma-pl330", + }, + .id_table = pl330_ids, + .probe = pl330_probe, + .remove = pl330_remove, +}; + +static int __init pl330_init(void) +{ + return amba_driver_register(&pl330_driver); +} +module_init(pl330_init); + +static void __exit pl330_exit(void) +{ + amba_driver_unregister(&pl330_driver); + return; +} +module_exit(pl330_exit); + +MODULE_AUTHOR("Jaswinder Singh "); +MODULE_DESCRIPTION("API Driver for PL330 DMAC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index adc10a2ac5f6..996c1bdb5a34 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -774,7 +774,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci) static void i5000_check_error(struct mem_ctl_info *mci) { struct i5000_error_info info; - debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5000_get_error_info(mci, &info); i5000_process_error_info(mci, &info, 1); } @@ -1353,8 +1353,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) int num_dimms_per_channel; int num_csrows; - debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __func__, + debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -1389,7 +1389,7 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) return -ENOMEM; kobject_get(&mci->edac_mci_kobj); - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ @@ -1432,8 +1432,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", + __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1478,7 +1478,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); @@ -1497,7 +1497,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i5000_pci) edac_pci_release_generic_ctl(i5000_pci); @@ -1544,7 +1544,7 @@ static int __init i5000_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1560,7 +1560,7 @@ static int __init i5000_init(void) */ static void __exit i5000_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5000_driver); } diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index f99d10655ed4..010c1d6526f5 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -694,7 +694,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci) static void i5400_check_error(struct mem_ctl_info *mci) { struct i5400_error_info info; - debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i5400_get_error_info(mci, &info); i5400_process_error_info(mci, &info); } @@ -1227,8 +1227,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (dev_idx >= ARRAY_SIZE(i5400_devs)) return -EINVAL; - debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __func__, + debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __FILE__, __func__, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); @@ -1256,7 +1256,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; /* record ptr to the generic device */ @@ -1299,8 +1299,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", + __FILE__, __func__); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1344,7 +1344,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* wake up device */ rc = pci_enable_device(pdev); @@ -1363,7 +1363,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i5400_pci) edac_pci_release_generic_ctl(i5400_pci); @@ -1409,7 +1409,7 @@ static int __init i5400_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1425,7 +1425,7 @@ static int __init i5400_init(void) */ static void __exit i5400_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + debugf2("MC: %s: %s()\n", __FILE__, __func__); pci_unregister_driver(&i5400_driver); } diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 2bf2c5051bfe..a2fa1feed724 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c @@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) { struct i82443bxgx_edacmc_error_info info; - debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); i82443bxgx_edacmc_get_error_info(mci, &info); i82443bxgx_edacmc_process_error_info(mci, &info, 1); } @@ -198,13 +198,13 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, for (index = 0; index < mci->nr_csrows; index++) { csrow = &mci->csrows[index]; pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); - debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", - mci->mc_idx, __func__, index, drbar); + debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", + mci->mc_idx, __FILE__, __func__, index, drbar); row_high_limit = ((u32) drbar << 23); /* find the DRAM Chip Select Base address and mask */ - debugf1("MC%d: " __FILE__ ": %s() Row=%d, " - "Boundry Address=%#0x, Last = %#0x \n", - mci->mc_idx, __func__, index, row_high_limit, + debugf1("MC%d: %s: %s() Row=%d, " + "Boundry Address=%#0x, Last = %#0x\n", + mci->mc_idx, __FILE__, __func__, index, row_high_limit, row_high_limit_last); /* 440GX goes to 2GB, represented with a DRB of 0. */ @@ -237,7 +237,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) enum mem_type mtype; enum edac_type edac_mode; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* Something is really hosed if PCI config space reads from * the MC aren't working. @@ -250,7 +250,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; @@ -336,7 +336,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) __func__); } - debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + debugf3("MC: %s: %s(): success\n", __FILE__, __func__); return 0; fail: @@ -352,7 +352,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: " __FILE__ ": %s()\n", __func__); + debugf0("MC: %s: %s()\n", __FILE__, __func__); /* don't need to call pci_enable_device() */ rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); @@ -367,7 +367,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0(__FILE__ ": %s()\n", __func__); + debugf0("%s: %s()\n", __FILE__, __func__); if (i82443bxgx_pci) edac_pci_release_generic_ctl(i82443bxgx_pci); diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 5045156c5313..9dcb30466ec0 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -63,7 +62,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; #define BIB_CRC(v) ((v) << 0) #define BIB_CRC_LENGTH(v) ((v) << 16) #define BIB_INFO_LENGTH(v) ((v) << 24) - +#define BIB_BUS_NAME 0x31333934 /* "1394" */ #define BIB_LINK_SPEED(v) ((v) << 0) #define BIB_GENERATION(v) ((v) << 4) #define BIB_MAX_ROM(v) ((v) << 8) @@ -73,7 +72,8 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; #define BIB_BMC ((1) << 28) #define BIB_ISC ((1) << 29) #define BIB_CMC ((1) << 30) -#define BIB_IMC ((1) << 31) +#define BIB_IRMC ((1) << 31) +#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */ static void generate_config_rom(struct fw_card *card, __be32 *config_rom) { @@ -91,18 +91,18 @@ static void generate_config_rom(struct fw_card *card, __be32 *config_rom) config_rom[0] = cpu_to_be32( BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0)); - config_rom[1] = cpu_to_be32(0x31333934); + config_rom[1] = cpu_to_be32(BIB_BUS_NAME); config_rom[2] = cpu_to_be32( BIB_LINK_SPEED(card->link_speed) | BIB_GENERATION(card->config_rom_generation++ % 14 + 2) | BIB_MAX_ROM(2) | BIB_MAX_RECEIVE(card->max_receive) | - BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC); + BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC); config_rom[3] = cpu_to_be32(card->guid >> 32); config_rom[4] = cpu_to_be32(card->guid); /* Generate root directory. */ - config_rom[6] = cpu_to_be32(0x0c0083c0); /* node capabilities */ + config_rom[6] = cpu_to_be32(NODE_CAPABILITIES); i = 7; j = 7 + descriptor_count; @@ -407,13 +407,6 @@ static void fw_card_bm_work(struct work_struct *work) fw_card_put(card); } -static void flush_timer_callback(unsigned long data) -{ - struct fw_card *card = (struct fw_card *)data; - - fw_flush_transactions(card); -} - void fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, struct device *device) @@ -432,8 +425,6 @@ void fw_card_initialize(struct fw_card *card, init_completion(&card->done); INIT_LIST_HEAD(&card->transaction_list); spin_lock_init(&card->lock); - setup_timer(&card->flush_timer, - flush_timer_callback, (unsigned long)card); card->local_node = NULL; @@ -558,7 +549,6 @@ void fw_core_remove_card(struct fw_card *card) wait_for_completion(&card->done); WARN_ON(!list_empty(&card->transaction_list)); - del_timer_sync(&card->flush_timer); } EXPORT_SYMBOL(fw_core_remove_card); diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 14a34d99eea2..5bf106b9d791 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -227,7 +227,7 @@ static int fw_device_op_open(struct inode *inode, struct file *file) list_add_tail(&client->link, &device->client_list); mutex_unlock(&device->client_list_mutex); - return 0; + return nonseekable_open(inode, file); } static void queue_event(struct client *client, struct event *event, @@ -1496,13 +1496,13 @@ static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) const struct file_operations fw_device_ops = { .owner = THIS_MODULE, + .llseek = no_llseek, .open = fw_device_op_open, .read = fw_device_op_read, .unlocked_ioctl = fw_device_op_ioctl, - .poll = fw_device_op_poll, - .release = fw_device_op_release, .mmap = fw_device_op_mmap, - + .release = fw_device_op_release, + .poll = fw_device_op_poll, #ifdef CONFIG_COMPAT .compat_ioctl = fw_device_op_compat_ioctl, #endif diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 673b03f8b4ec..fdc33ff06dc1 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -81,7 +81,7 @@ static int close_transaction(struct fw_transaction *transaction, spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t == transaction) { - list_del(&t->link); + list_del_init(&t->link); card->tlabel_mask &= ~(1ULL << t->tlabel); break; } @@ -89,6 +89,7 @@ static int close_transaction(struct fw_transaction *transaction, spin_unlock_irqrestore(&card->lock, flags); if (&t->link != &card->transaction_list) { + del_timer_sync(&t->split_timeout_timer); t->callback(card, rcode, NULL, 0, t->callback_data); return 0; } @@ -121,6 +122,31 @@ int fw_cancel_transaction(struct fw_card *card, } EXPORT_SYMBOL(fw_cancel_transaction); +static void split_transaction_timeout_callback(unsigned long data) +{ + struct fw_transaction *t = (struct fw_transaction *)data; + struct fw_card *card = t->card; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + if (list_empty(&t->link)) { + spin_unlock_irqrestore(&card->lock, flags); + return; + } + list_del(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); + spin_unlock_irqrestore(&card->lock, flags); + + card->driver->cancel_packet(card, &t->packet); + + /* + * At this point cancel_packet will never call the transaction + * callback, since we just took the transaction out of the list. + * So do it here. + */ + t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); +} + static void transmit_complete_callback(struct fw_packet *packet, struct fw_card *card, int status) { @@ -229,6 +255,23 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, packet->payload_mapped = false; } +static int allocate_tlabel(struct fw_card *card) +{ + int tlabel; + + tlabel = card->current_tlabel; + while (card->tlabel_mask & (1ULL << tlabel)) { + tlabel = (tlabel + 1) & 0x3f; + if (tlabel == card->current_tlabel) + return -EBUSY; + } + + card->current_tlabel = (tlabel + 1) & 0x3f; + card->tlabel_mask |= 1ULL << tlabel; + + return tlabel; +} + /** * This function provides low-level access to the IEEE1394 transaction * logic. Most C programs would use either fw_read(), fw_write() or @@ -276,13 +319,6 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, unsigned long flags; int tlabel; - /* - * Bump the flush timer up 100ms first of all so we - * don't race with a flush timer callback. - */ - - mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10)); - /* * Allocate tlabel from the bitmap and put the transaction on * the list while holding the card spinlock. @@ -290,18 +326,20 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, spin_lock_irqsave(&card->lock, flags); - tlabel = card->current_tlabel; - if (card->tlabel_mask & (1ULL << tlabel)) { + tlabel = allocate_tlabel(card); + if (tlabel < 0) { spin_unlock_irqrestore(&card->lock, flags); callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); return; } - card->current_tlabel = (card->current_tlabel + 1) & 0x3f; - card->tlabel_mask |= (1ULL << tlabel); - t->node_id = destination_id; t->tlabel = tlabel; + t->card = card; + setup_timer(&t->split_timeout_timer, + split_transaction_timeout_callback, (unsigned long)t); + /* FIXME: start this timer later, relative to t->timestamp */ + mod_timer(&t->split_timeout_timer, jiffies + DIV_ROUND_UP(HZ, 10)); t->callback = callback; t->callback_data = callback_data; @@ -347,11 +385,13 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, struct transaction_callback_data d; struct fw_transaction t; + init_timer_on_stack(&t.split_timeout_timer); init_completion(&d.done); d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, offset, payload, length, transaction_callback, &d); wait_for_completion(&d.done); + destroy_timer_on_stack(&t.split_timeout_timer); return d.rcode; } @@ -394,30 +434,6 @@ void fw_send_phy_config(struct fw_card *card, mutex_unlock(&phy_config_mutex); } -void fw_flush_transactions(struct fw_card *card) -{ - struct fw_transaction *t, *next; - struct list_head list; - unsigned long flags; - - INIT_LIST_HEAD(&list); - spin_lock_irqsave(&card->lock, flags); - list_splice_init(&card->transaction_list, &list); - card->tlabel_mask = 0; - spin_unlock_irqrestore(&card->lock, flags); - - list_for_each_entry_safe(t, next, &list, link) { - card->driver->cancel_packet(card, &t->packet); - - /* - * At this point cancel_packet will never call the - * transaction callback, since we just took all the - * transactions out of the list. So do it here. - */ - t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data); - } -} - static struct fw_address_handler *lookup_overlapping_address_handler( struct list_head *list, unsigned long long offset, size_t length) { @@ -827,8 +843,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) spin_lock_irqsave(&card->lock, flags); list_for_each_entry(t, &card->transaction_list, link) { if (t->node_id == source && t->tlabel == tlabel) { - list_del(&t->link); - card->tlabel_mask &= ~(1 << t->tlabel); + list_del_init(&t->link); + card->tlabel_mask &= ~(1ULL << t->tlabel); break; } } @@ -869,6 +885,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) break; } + del_timer_sync(&t->split_timeout_timer); + /* * The response handler may be executed while the request handler * is still pending. Cancel the request handler. diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index fb0321300cce..0ecfcd95f4c5 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -27,7 +27,12 @@ struct fw_packet; #define PHY_LINK_ACTIVE 0x80 #define PHY_CONTENDER 0x40 #define PHY_BUS_RESET 0x40 +#define PHY_EXTENDED_REGISTERS 0xe0 #define PHY_BUS_SHORT_RESET 0x40 +#define PHY_INT_STATUS_BITS 0x3c +#define PHY_ENABLE_ACCEL 0x02 +#define PHY_ENABLE_MULTI 0x01 +#define PHY_PAGE_SELECT 0xe0 #define BANDWIDTH_AVAILABLE_INITIAL 4915 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31) @@ -215,7 +220,6 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); void fw_fill_response(struct fw_packet *response, u32 *request_header, int rcode, void *payload, size_t length); -void fw_flush_transactions(struct fw_card *card); void fw_send_phy_config(struct fw_card *card, int node_id, int generation, int gap_count); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index a3b083a7403a..9f627e758cfc 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -236,13 +236,15 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_CYCLE_TIMER 1 #define QUIRK_RESET_PACKET 2 #define QUIRK_BE_HEADERS 4 +#define QUIRK_NO_1394A 8 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { unsigned short vendor, device, flags; } ohci_quirks[] = { {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | - QUIRK_RESET_PACKET}, + QUIRK_RESET_PACKET | + QUIRK_NO_1394A}, {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, @@ -257,15 +259,16 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) + ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) ")"); -#ifdef CONFIG_FIREWIRE_OHCI_DEBUG - #define OHCI_PARAM_DEBUG_AT_AR 1 #define OHCI_PARAM_DEBUG_SELFIDS 2 #define OHCI_PARAM_DEBUG_IRQS 4 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ +#ifdef CONFIG_FIREWIRE_OHCI_DEBUG + static int param_debug; module_param_named(debug, param_debug, int, 0644); MODULE_PARM_DESC(debug, "Verbose logging (default = 0" @@ -438,9 +441,10 @@ static void log_ar_at_event(char dir, int speed, u32 *header, int evt) #else -#define log_irqs(evt) -#define log_selfids(node_id, generation, self_id_count, sid) -#define log_ar_at_event(dir, speed, header, evt) +#define param_debug 0 +static inline void log_irqs(u32 evt) {} +static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {} +static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {} #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */ @@ -460,27 +464,71 @@ static inline void flush_writes(const struct fw_ohci *ohci) reg_read(ohci, OHCI1394_Version); } -static int ohci_update_phy_reg(struct fw_card *card, int addr, - int clear_bits, int set_bits) +static int read_phy_reg(struct fw_ohci *ohci, int addr) { - struct fw_ohci *ohci = fw_ohci(card); - u32 val, old; + u32 val; + int i; reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); - flush_writes(ohci); - msleep(2); - val = reg_read(ohci, OHCI1394_PhyControl); - if ((val & OHCI1394_PhyControl_ReadDone) == 0) { - fw_error("failed to set phy reg bits.\n"); - return -EBUSY; + for (i = 0; i < 10; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (val & OHCI1394_PhyControl_ReadDone) + return OHCI1394_PhyControl_ReadData(val); + + msleep(1); } + fw_error("failed to read phy reg\n"); + + return -EBUSY; +} + +static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) +{ + int i; - old = OHCI1394_PhyControl_ReadData(val); - old = (old & ~clear_bits) | set_bits; reg_write(ohci, OHCI1394_PhyControl, - OHCI1394_PhyControl_Write(addr, old)); + OHCI1394_PhyControl_Write(addr, val)); + for (i = 0; i < 100; i++) { + val = reg_read(ohci, OHCI1394_PhyControl); + if (!(val & OHCI1394_PhyControl_WritePending)) + return 0; - return 0; + msleep(1); + } + fw_error("failed to write phy reg\n"); + + return -EBUSY; +} + +static int ohci_update_phy_reg(struct fw_card *card, int addr, + int clear_bits, int set_bits) +{ + struct fw_ohci *ohci = fw_ohci(card); + int ret; + + ret = read_phy_reg(ohci, addr); + if (ret < 0) + return ret; + + /* + * The interrupt status bits are cleared by writing a one bit. + * Avoid clearing them unless explicitly requested in set_bits. + */ + if (addr == 5) + clear_bits |= PHY_INT_STATUS_BITS; + + return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); +} + +static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) +{ + int ret; + + ret = ohci_update_phy_reg(&ohci->card, 7, PHY_PAGE_SELECT, page << 5); + if (ret < 0) + return ret; + + return read_phy_reg(ohci, addr); } static int ar_context_add_page(struct ar_context *ctx) @@ -1495,13 +1543,64 @@ static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) memset(&dest[length], 0, CONFIG_ROM_SIZE - size); } +static int configure_1394a_enhancements(struct fw_ohci *ohci) +{ + bool enable_1394a; + int ret, clear, set, offset; + + /* Check if the driver should configure link and PHY. */ + if (!(reg_read(ohci, OHCI1394_HCControlSet) & + OHCI1394_HCControl_programPhyEnable)) + return 0; + + /* Paranoia: check whether the PHY supports 1394a, too. */ + enable_1394a = false; + ret = read_phy_reg(ohci, 2); + if (ret < 0) + return ret; + if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { + ret = read_paged_phy_reg(ohci, 1, 8); + if (ret < 0) + return ret; + if (ret >= 1) + enable_1394a = true; + } + + if (ohci->quirks & QUIRK_NO_1394A) + enable_1394a = false; + + /* Configure PHY and link consistently. */ + if (enable_1394a) { + clear = 0; + set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + } else { + clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; + set = 0; + } + ret = ohci_update_phy_reg(&ohci->card, 5, clear, set); + if (ret < 0) + return ret; + + if (enable_1394a) + offset = OHCI1394_HCControlSet; + else + offset = OHCI1394_HCControlClear; + reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); + + /* Clean up: configuration has been taken care of. */ + reg_write(ohci, OHCI1394_HCControlClear, + OHCI1394_HCControl_programPhyEnable); + + return 0; +} + static int ohci_enable(struct fw_card *card, const __be32 *config_rom, size_t length) { struct fw_ohci *ohci = fw_ohci(card); struct pci_dev *dev = to_pci_dev(card->device); u32 lps; - int i; + int i, ret; if (software_reset(ohci)) { fw_error("Failed to reset ohci card.\n"); @@ -1565,10 +1664,14 @@ static int ohci_enable(struct fw_card *card, if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); + ret = configure_1394a_enhancements(ohci); + if (ret < 0) + return ret; + /* Activate link_on bit and contender bit in our self ID packets.*/ - if (ohci_update_phy_reg(card, 4, 0, - PHY_LINK_ACTIVE | PHY_CONTENDER) < 0) - return -EIO; + ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); + if (ret < 0) + return ret; /* * When the link is not yet enabled, the atomic config rom @@ -2304,7 +2407,7 @@ static const struct fw_card_driver ohci_driver = { }; #ifdef CONFIG_PPC_PMAC -static void ohci_pmac_on(struct pci_dev *dev) +static void pmac_ohci_on(struct pci_dev *dev) { if (machine_is(powermac)) { struct device_node *ofn = pci_device_to_OF_node(dev); @@ -2316,7 +2419,7 @@ static void ohci_pmac_on(struct pci_dev *dev) } } -static void ohci_pmac_off(struct pci_dev *dev) +static void pmac_ohci_off(struct pci_dev *dev) { if (machine_is(powermac)) { struct device_node *ofn = pci_device_to_OF_node(dev); @@ -2328,15 +2431,15 @@ static void ohci_pmac_off(struct pci_dev *dev) } } #else -#define ohci_pmac_on(dev) -#define ohci_pmac_off(dev) +static inline void pmac_ohci_on(struct pci_dev *dev) {} +static inline void pmac_ohci_off(struct pci_dev *dev) {} #endif /* CONFIG_PPC_PMAC */ static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; - u32 bus_options, max_receive, link_speed, version; + u32 bus_options, max_receive, link_speed, version, link_enh; u64 guid; int i, err, n_ir, n_it; size_t size; @@ -2349,7 +2452,7 @@ static int __devinit pci_probe(struct pci_dev *dev, fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); - ohci_pmac_on(dev); + pmac_ohci_on(dev); err = pci_enable_device(dev); if (err) { @@ -2389,6 +2492,23 @@ static int __devinit pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; + /* TI OHCI-Lynx and compatible: set recommended configuration bits. */ + if (dev->vendor == PCI_VENDOR_ID_TI) { + pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh); + + /* adjust latency of ATx FIFO: use 1.7 KB threshold */ + link_enh &= ~TI_LinkEnh_atx_thresh_mask; + link_enh |= TI_LinkEnh_atx_thresh_1_7K; + + /* use priority arbitration for asynchronous responses */ + link_enh |= TI_LinkEnh_enab_unfair; + + /* required for aPhyEnhanceEnable to work */ + link_enh |= TI_LinkEnh_enab_accel; + + pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh); + } + ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); @@ -2466,7 +2586,7 @@ static int __devinit pci_probe(struct pci_dev *dev, pci_disable_device(dev); fail_free: kfree(&ohci->card); - ohci_pmac_off(dev); + pmac_ohci_off(dev); fail: if (err == -ENOMEM) fw_error("Out of memory\n"); @@ -2509,7 +2629,7 @@ static void pci_remove(struct pci_dev *dev) pci_release_region(dev, 0); pci_disable_device(dev); kfree(&ohci->card); - ohci_pmac_off(dev); + pmac_ohci_off(dev); fw_notify("Removed fw-ohci device.\n"); } @@ -2530,7 +2650,7 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) err = pci_set_power_state(dev, pci_choose_state(dev, state)); if (err) fw_error("pci_set_power_state failed with %d\n", err); - ohci_pmac_off(dev); + pmac_ohci_off(dev); return 0; } @@ -2540,7 +2660,7 @@ static int pci_resume(struct pci_dev *dev) struct fw_ohci *ohci = pci_get_drvdata(dev); int err; - ohci_pmac_on(dev); + pmac_ohci_on(dev); pci_set_power_state(dev, PCI_D0); pci_restore_state(dev); err = pci_enable_device(dev); diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index ba492d85c516..3bc9a5d744eb 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -67,7 +67,7 @@ #define OHCI1394_PhyControl_ReadDone 0x80000000 #define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16) #define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000) -#define OHCI1394_PhyControl_WriteDone 0x00004000 +#define OHCI1394_PhyControl_WritePending 0x00004000 #define OHCI1394_IsochronousCycleTimer 0x0F0 #define OHCI1394_AsReqFilterHiSet 0x100 #define OHCI1394_AsReqFilterHiClear 0x104 @@ -154,4 +154,12 @@ #define OHCI1394_phy_tcode 0xe +/* TI extensions */ + +#define PCI_CFG_TI_LinkEnh 0xf4 +#define TI_LinkEnh_enab_accel 0x00000002 +#define TI_LinkEnh_enab_unfair 0x00000080 +#define TI_LinkEnh_atx_thresh_mask 0x00003000 +#define TI_LinkEnh_atx_thresh_1_7K 0x00001000 + #endif /* _FIREWIRE_OHCI_H */ diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index fee678f74a19..724038dab4ca 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -139,6 +139,13 @@ config GPIO_MAX732X Board setup code must specify the model to use, and the start number for these GPIOs. +config GPIO_MAX732X_IRQ + bool "Interrupt controller support for MAX732x" + depends on GPIO_MAX732X=y && GENERIC_HARDIRQS + help + Say yes here to enable the max732x to be used as an interrupt + controller. It requires the driver to be built in the kernel. + config GPIO_PCA953X tristate "PCA953x, PCA955x, TCA64xx, and MAX7310 I/O ports" depends on I2C @@ -188,6 +195,13 @@ config GPIO_PCF857X This driver provides an in-kernel interface to those GPIOs using platform-neutral GPIO calls. +config GPIO_TC35892 + bool "TC35892 GPIOs" + depends on MFD_TC35892 + help + This enables support for the GPIOs found on the TC35892 + I/O Expander. + config GPIO_TWL4030 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" depends on TWL4030_CORE @@ -264,10 +278,10 @@ config GPIO_BT8XX If unsure, say N. config GPIO_LANGWELL - bool "Intel Moorestown Platform Langwell GPIO support" + bool "Intel Langwell/Penwell GPIO support" depends on PCI help - Say Y here to support Intel Moorestown platform GPIO. + Say Y here to support Intel Langwell/Penwell GPIO. config GPIO_TIMBERDALE bool "Support for timberdale GPIO IP" @@ -275,6 +289,15 @@ config GPIO_TIMBERDALE ---help--- Add support for the GPIO IP in the timberdale FPGA. +config GPIO_RDC321X + tristate "RDC R-321x GPIO support" + depends on PCI && GPIOLIB + select MFD_CORE + select MFD_RDC321X + help + Support for the RDC R321x SoC GPIOs over southbridge + PCI configuration space. + comment "SPI GPIO expanders:" config GPIO_MAX7301 @@ -310,4 +333,14 @@ config GPIO_UCB1400 To compile this driver as a module, choose M here: the module will be called ucb1400_gpio. +comment "MODULbus GPIO expanders:" + +config GPIO_JANZ_TTL + tristate "Janz VMOD-TTL Digital IO Module" + depends on MFD_JANZ_CMODIO + help + This enables support for the Janz VMOD-TTL Digital IO module. + This driver provides support for driving the pins in output + mode only. Input mode is not supported. + endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 10f3f8d958b1..51c3cdd41b5a 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o obj-$(CONFIG_GPIO_PCA953X) += pca953x.o obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o obj-$(CONFIG_GPIO_PL061) += pl061.o +obj-$(CONFIG_GPIO_TC35892) += tc35892-gpio.o obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o @@ -27,4 +28,6 @@ obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o -obj-$(CONFIG_GPIO_SCH) += sch_gpio.o \ No newline at end of file +obj-$(CONFIG_GPIO_SCH) += sch_gpio.o +obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o +obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index 0c3c498f2260..f73a1555e49d 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -197,7 +197,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val) return 0; } -static char *cs5535_gpio_names[] = { +static const char * const cs5535_gpio_names[] = { "GPIO0", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5", "GPIO6", "GPIO7", "GPIO8", "GPIO9", "GPIO10", "GPIO11", diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index cae1b8c5b08c..3ca36542e338 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -722,7 +722,7 @@ int gpio_export(unsigned gpio, bool direction_may_change) unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; - char *ioname = NULL; + const char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { @@ -753,7 +753,7 @@ int gpio_export(unsigned gpio, bool direction_may_change) struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%d", gpio); + desc, ioname ? ioname : "gpio%u", gpio); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpio_attr_group); @@ -1106,7 +1106,7 @@ unlock: fail: /* failures here can mean systems won't boot... */ if (status) - pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n", + pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; @@ -1447,6 +1447,49 @@ fail: } EXPORT_SYMBOL_GPL(gpio_direction_output); +/** + * gpio_set_debounce - sets @debounce time for a @gpio + * @gpio: the gpio to set debounce time + * @debounce: debounce time is microseconds + */ +int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + unsigned long flags; + struct gpio_chip *chip; + struct gpio_desc *desc = &gpio_desc[gpio]; + int status = -EINVAL; + + spin_lock_irqsave(&gpio_lock, flags); + + if (!gpio_is_valid(gpio)) + goto fail; + chip = desc->chip; + if (!chip || !chip->set || !chip->set_debounce) + goto fail; + gpio -= chip->base; + if (gpio >= chip->ngpio) + goto fail; + status = gpio_ensure_requested(desc, gpio); + if (status < 0) + goto fail; + + /* now we know the gpio is valid and chip won't vanish */ + + spin_unlock_irqrestore(&gpio_lock, flags); + + might_sleep_if(extra_checks && chip->can_sleep); + + return chip->set_debounce(chip, gpio, debounce); + +fail: + spin_unlock_irqrestore(&gpio_lock, flags); + if (status) + pr_debug("%s: gpio-%d status %d\n", + __func__, gpio, status); + + return status; +} +EXPORT_SYMBOL_GPL(gpio_set_debounce); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c index 41a9388f2fde..48fc43c4bdd1 100644 --- a/drivers/gpio/it8761e_gpio.c +++ b/drivers/gpio/it8761e_gpio.c @@ -217,7 +217,10 @@ gpiochip_add_err: static void __exit it8761e_gpio_exit(void) { if (gpio_ba) { - gpiochip_remove(&it8761e_gpio_chip); + int ret = gpiochip_remove(&it8761e_gpio_chip); + + WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n", + __func__, ret); release_region(gpio_ba, GPIO_IOSIZE); gpio_ba = 0; diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c new file mode 100644 index 000000000000..813ac077e5d7 --- /dev/null +++ b/drivers/gpio/janz-ttl.c @@ -0,0 +1,258 @@ +/* + * Janz MODULbus VMOD-TTL GPIO Driver + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "janz-ttl" + +#define PORTA_DIRECTION 0x23 +#define PORTB_DIRECTION 0x2B +#define PORTC_DIRECTION 0x06 +#define PORTA_IOCTL 0x24 +#define PORTB_IOCTL 0x2C +#define PORTC_IOCTL 0x07 + +#define MASTER_INT_CTL 0x00 +#define MASTER_CONF_CTL 0x01 + +#define CONF_PAE (1 << 2) +#define CONF_PBE (1 << 7) +#define CONF_PCE (1 << 4) + +struct ttl_control_regs { + __be16 portc; + __be16 portb; + __be16 porta; + __be16 control; +}; + +struct ttl_module { + struct gpio_chip gpio; + + /* base address of registers */ + struct ttl_control_regs __iomem *regs; + + u8 portc_shadow; + u8 portb_shadow; + u8 porta_shadow; + + spinlock_t lock; +}; + +static int ttl_get_value(struct gpio_chip *gpio, unsigned offset) +{ + struct ttl_module *mod = dev_get_drvdata(gpio->dev); + u8 *shadow; + int ret; + + if (offset < 8) { + shadow = &mod->porta_shadow; + } else if (offset < 16) { + shadow = &mod->portb_shadow; + offset -= 8; + } else { + shadow = &mod->portc_shadow; + offset -= 16; + } + + spin_lock(&mod->lock); + ret = *shadow & (1 << offset); + spin_unlock(&mod->lock); + return ret; +} + +static void ttl_set_value(struct gpio_chip *gpio, unsigned offset, int value) +{ + struct ttl_module *mod = dev_get_drvdata(gpio->dev); + void __iomem *port; + u8 *shadow; + + if (offset < 8) { + port = &mod->regs->porta; + shadow = &mod->porta_shadow; + } else if (offset < 16) { + port = &mod->regs->portb; + shadow = &mod->portb_shadow; + offset -= 8; + } else { + port = &mod->regs->portc; + shadow = &mod->portc_shadow; + offset -= 16; + } + + spin_lock(&mod->lock); + if (value) + *shadow |= (1 << offset); + else + *shadow &= ~(1 << offset); + + iowrite16be(*shadow, port); + spin_unlock(&mod->lock); +} + +static void __devinit ttl_write_reg(struct ttl_module *mod, u8 reg, u16 val) +{ + iowrite16be(reg, &mod->regs->control); + iowrite16be(val, &mod->regs->control); +} + +static void __devinit ttl_setup_device(struct ttl_module *mod) +{ + /* reset the device to a known state */ + iowrite16be(0x0000, &mod->regs->control); + iowrite16be(0x0001, &mod->regs->control); + iowrite16be(0x0000, &mod->regs->control); + + /* put all ports in open-drain mode */ + ttl_write_reg(mod, PORTA_IOCTL, 0x00ff); + ttl_write_reg(mod, PORTB_IOCTL, 0x00ff); + ttl_write_reg(mod, PORTC_IOCTL, 0x000f); + + /* set all ports as outputs */ + ttl_write_reg(mod, PORTA_DIRECTION, 0x0000); + ttl_write_reg(mod, PORTB_DIRECTION, 0x0000); + ttl_write_reg(mod, PORTC_DIRECTION, 0x0000); + + /* set all ports to drive zeroes */ + iowrite16be(0x0000, &mod->regs->porta); + iowrite16be(0x0000, &mod->regs->portb); + iowrite16be(0x0000, &mod->regs->portc); + + /* enable all ports */ + ttl_write_reg(mod, MASTER_CONF_CTL, CONF_PAE | CONF_PBE | CONF_PCE); +} + +static int __devinit ttl_probe(struct platform_device *pdev) +{ + struct janz_platform_data *pdata; + struct device *dev = &pdev->dev; + struct ttl_module *mod; + struct gpio_chip *gpio; + struct resource *res; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(dev, "no platform data\n"); + ret = -ENXIO; + goto out_return; + } + + mod = kzalloc(sizeof(*mod), GFP_KERNEL); + if (!mod) { + dev_err(dev, "unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + platform_set_drvdata(pdev, mod); + spin_lock_init(&mod->lock); + + /* get access to the MODULbus registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "MODULbus registers not found\n"); + ret = -ENODEV; + goto out_free_mod; + } + + mod->regs = ioremap(res->start, resource_size(res)); + if (!mod->regs) { + dev_err(dev, "MODULbus registers not ioremap\n"); + ret = -ENOMEM; + goto out_free_mod; + } + + ttl_setup_device(mod); + + /* Initialize the GPIO data structures */ + gpio = &mod->gpio; + gpio->dev = &pdev->dev; + gpio->label = pdev->name; + gpio->get = ttl_get_value; + gpio->set = ttl_set_value; + gpio->owner = THIS_MODULE; + + /* request dynamic allocation */ + gpio->base = -1; + gpio->ngpio = 20; + + ret = gpiochip_add(gpio); + if (ret) { + dev_err(dev, "unable to add GPIO chip\n"); + goto out_iounmap_regs; + } + + dev_info(&pdev->dev, "module %d: registered GPIO device\n", + pdata->modno); + return 0; + +out_iounmap_regs: + iounmap(mod->regs); +out_free_mod: + kfree(mod); +out_return: + return ret; +} + +static int __devexit ttl_remove(struct platform_device *pdev) +{ + struct ttl_module *mod = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + ret = gpiochip_remove(&mod->gpio); + if (ret) { + dev_err(dev, "unable to remove GPIO chip\n"); + return ret; + } + + iounmap(mod->regs); + kfree(mod); + return 0; +} + +static struct platform_driver ttl_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ttl_probe, + .remove = __devexit_p(ttl_remove), +}; + +static int __init ttl_init(void) +{ + return platform_driver_register(&ttl_driver); +} + +static void __exit ttl_exit(void) +{ + platform_driver_unregister(&ttl_driver); +} + +MODULE_AUTHOR("Ira W. Snyder "); +MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:janz-ttl"); + +module_init(ttl_init); +module_exit(ttl_exit); diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c index 00c3a14127af..8383a8d7f994 100644 --- a/drivers/gpio/langwell_gpio.c +++ b/drivers/gpio/langwell_gpio.c @@ -17,6 +17,7 @@ /* Supports: * Moorestown platform Langwell chip. + * Medfield platform Penwell chip. */ #include @@ -31,44 +32,65 @@ #include #include -struct lnw_gpio_register { - u32 GPLR[2]; - u32 GPDR[2]; - u32 GPSR[2]; - u32 GPCR[2]; - u32 GRER[2]; - u32 GFER[2]; - u32 GEDR[2]; +/* + * Langwell chip has 64 pins and thus there are 2 32bit registers to control + * each feature, while Penwell chip has 96 pins for each block, and need 3 32bit + * registers to control them, so we only define the order here instead of a + * structure, to get a bit offset for a pin (use GPDR as an example): + * + * nreg = ngpio / 32; + * reg = offset / 32; + * bit = offset % 32; + * reg_addr = reg_base + GPDR * nreg * 4 + reg * 4; + * + * so the bit of reg_addr is to control pin offset's GPDR feature +*/ + +enum GPIO_REG { + GPLR = 0, /* pin level read-only */ + GPDR, /* pin direction */ + GPSR, /* pin set */ + GPCR, /* pin clear */ + GRER, /* rising edge detect */ + GFER, /* falling edge detect */ + GEDR, /* edge detect result */ }; struct lnw_gpio { struct gpio_chip chip; - struct lnw_gpio_register *reg_base; + void *reg_base; spinlock_t lock; unsigned irq_base; }; -static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) +static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset, + enum GPIO_REG reg_type) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); + unsigned nreg = chip->ngpio / 32; u8 reg = offset / 32; - void __iomem *gplr; + void __iomem *ptr; + + ptr = (void __iomem *)(lnw->reg_base + reg_type * nreg * 4 + reg * 4); + return ptr; +} + +static int lnw_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + void __iomem *gplr = gpio_reg(chip, offset, GPLR); - gplr = (void __iomem *)(&lnw->reg_base->GPLR[reg]); return readl(gplr) & BIT(offset % 32); } static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { - struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; void __iomem *gpsr, *gpcr; if (value) { - gpsr = (void __iomem *)(&lnw->reg_base->GPSR[reg]); + gpsr = gpio_reg(chip, offset, GPSR); writel(BIT(offset % 32), gpsr); } else { - gpcr = (void __iomem *)(&lnw->reg_base->GPCR[reg]); + gpcr = gpio_reg(chip, offset, GPCR); writel(BIT(offset % 32), gpcr); } } @@ -76,12 +98,10 @@ static void lnw_gpio_set(struct gpio_chip *chip, unsigned offset, int value) static int lnw_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); u32 value; unsigned long flags; - void __iomem *gpdr; - gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]); spin_lock_irqsave(&lnw->lock, flags); value = readl(gpdr); value &= ~BIT(offset % 32); @@ -94,12 +114,10 @@ static int lnw_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct lnw_gpio *lnw = container_of(chip, struct lnw_gpio, chip); - u8 reg = offset / 32; + void __iomem *gpdr = gpio_reg(chip, offset, GPDR); unsigned long flags; - void __iomem *gpdr; lnw_gpio_set(chip, offset, value); - gpdr = (void __iomem *)(&lnw->reg_base->GPDR[reg]); spin_lock_irqsave(&lnw->lock, flags); value = readl(gpdr); value |= BIT(offset % 32);; @@ -118,11 +136,10 @@ static int lnw_irq_type(unsigned irq, unsigned type) { struct lnw_gpio *lnw = get_irq_chip_data(irq); u32 gpio = irq - lnw->irq_base; - u8 reg = gpio / 32; unsigned long flags; u32 value; - void __iomem *grer = (void __iomem *)(&lnw->reg_base->GRER[reg]); - void __iomem *gfer = (void __iomem *)(&lnw->reg_base->GFER[reg]); + void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER); + void __iomem *gfer = gpio_reg(&lnw->chip, gpio, GFER); if (gpio >= lnw->chip.ngpio) return -EINVAL; @@ -158,8 +175,10 @@ static struct irq_chip lnw_irqchip = { .set_type = lnw_irq_type, }; -static struct pci_device_id lnw_gpio_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f) }, +static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = { /* pin number */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080f), .driver_data = 64 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081f), .driver_data = 96 }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x081a), .driver_data = 96 }, { 0, } }; MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); @@ -167,17 +186,17 @@ MODULE_DEVICE_TABLE(pci, lnw_gpio_ids); static void lnw_irq_handler(unsigned irq, struct irq_desc *desc) { struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq); - u32 reg, gpio; + u32 base, gpio; void __iomem *gedr; u32 gedr_v; /* check GPIO controller to check which pin triggered the interrupt */ - for (reg = 0; reg < lnw->chip.ngpio / 32; reg++) { - gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]); + for (base = 0; base < lnw->chip.ngpio; base += 32) { + gedr = gpio_reg(&lnw->chip, base, GEDR); gedr_v = readl(gedr); if (!gedr_v) continue; - for (gpio = reg*32; gpio < reg*32+32; gpio++) + for (gpio = base; gpio < base + 32; gpio++) if (gedr_v & BIT(gpio % 32)) { pr_debug("pin %d triggered\n", gpio); generic_handle_irq(lnw->irq_base + gpio); @@ -245,7 +264,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->chip.set = lnw_gpio_set; lnw->chip.to_irq = lnw_gpio_to_irq; lnw->chip.base = gpio_base; - lnw->chip.ngpio = 64; + lnw->chip.ngpio = id->driver_data; lnw->chip.can_sleep = 0; pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c index f7868243af89..9cad60f9e962 100644 --- a/drivers/gpio/max732x.c +++ b/drivers/gpio/max732x.c @@ -17,7 +17,8 @@ #include #include #include - +#include +#include #include #include @@ -31,7 +32,8 @@ * - Open Drain I/O * * designated by 'O', 'I' and 'P' individually according to MAXIM's - * datasheets. + * datasheets. 'I' and 'P' ports are interrupt capables, some with + * a dedicated interrupt mask. * * There are two groups of I/O ports, each group usually includes * up to 8 I/O ports, and is accessed by a specific I2C address: @@ -44,7 +46,8 @@ * * Within each group of ports, there are five known combinations of * I/O ports: 4I4O, 4P4O, 8I, 8P, 8O, see the definitions below for - * the detailed organization of these ports. + * the detailed organization of these ports. Only Goup A is interrupt + * capable. * * GPIO numbers start from 'gpio_base + 0' to 'gpio_base + 8/16', * and GPIOs from GROUP_A are numbered before those from GROUP_B @@ -68,16 +71,47 @@ #define GROUP_A(x) ((x) & 0xffff) /* I2C Addr: 0b'110xxxx */ #define GROUP_B(x) ((x) << 16) /* I2C Addr: 0b'101xxxx */ +#define INT_NONE 0x0 /* No interrupt capability */ +#define INT_NO_MASK 0x1 /* Has interrupts, no mask */ +#define INT_INDEP_MASK 0x2 /* Has interrupts, independent mask */ +#define INT_MERGED_MASK 0x3 /* Has interrupts, merged mask */ + +#define INT_CAPS(x) (((uint64_t)(x)) << 32) + +enum { + MAX7319, + MAX7320, + MAX7321, + MAX7322, + MAX7323, + MAX7324, + MAX7325, + MAX7326, + MAX7327, +}; + +static uint64_t max732x_features[] = { + [MAX7319] = GROUP_A(IO_8I) | INT_CAPS(INT_MERGED_MASK), + [MAX7320] = GROUP_B(IO_8O), + [MAX7321] = GROUP_A(IO_8P) | INT_CAPS(INT_NO_MASK), + [MAX7322] = GROUP_A(IO_4I4O) | INT_CAPS(INT_MERGED_MASK), + [MAX7323] = GROUP_A(IO_4P4O) | INT_CAPS(INT_INDEP_MASK), + [MAX7324] = GROUP_A(IO_8I) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK), + [MAX7325] = GROUP_A(IO_8P) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK), + [MAX7326] = GROUP_A(IO_4I4O) | GROUP_B(IO_8O) | INT_CAPS(INT_MERGED_MASK), + [MAX7327] = GROUP_A(IO_4P4O) | GROUP_B(IO_8O) | INT_CAPS(INT_NO_MASK), +}; + static const struct i2c_device_id max732x_id[] = { - { "max7319", GROUP_A(IO_8I) }, - { "max7320", GROUP_B(IO_8O) }, - { "max7321", GROUP_A(IO_8P) }, - { "max7322", GROUP_A(IO_4I4O) }, - { "max7323", GROUP_A(IO_4P4O) }, - { "max7324", GROUP_A(IO_8I) | GROUP_B(IO_8O) }, - { "max7325", GROUP_A(IO_8P) | GROUP_B(IO_8O) }, - { "max7326", GROUP_A(IO_4I4O) | GROUP_B(IO_8O) }, - { "max7327", GROUP_A(IO_4P4O) | GROUP_B(IO_8O) }, + { "max7319", MAX7319 }, + { "max7320", MAX7320 }, + { "max7321", MAX7321 }, + { "max7322", MAX7322 }, + { "max7323", MAX7323 }, + { "max7324", MAX7324 }, + { "max7325", MAX7325 }, + { "max7326", MAX7326 }, + { "max7327", MAX7327 }, { }, }; MODULE_DEVICE_TABLE(i2c, max732x_id); @@ -96,9 +130,19 @@ struct max732x_chip { struct mutex lock; uint8_t reg_out[2]; + +#ifdef CONFIG_GPIO_MAX732X_IRQ + struct mutex irq_lock; + int irq_base; + uint8_t irq_mask; + uint8_t irq_mask_cur; + uint8_t irq_trig_raise; + uint8_t irq_trig_fall; + uint8_t irq_features; +#endif }; -static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) +static int max732x_writeb(struct max732x_chip *chip, int group_a, uint8_t val) { struct i2c_client *client; int ret; @@ -113,7 +157,7 @@ static int max732x_write(struct max732x_chip *chip, int group_a, uint8_t val) return 0; } -static int max732x_read(struct max732x_chip *chip, int group_a, uint8_t *val) +static int max732x_readb(struct max732x_chip *chip, int group_a, uint8_t *val) { struct i2c_client *client; int ret; @@ -142,7 +186,7 @@ static int max732x_gpio_get_value(struct gpio_chip *gc, unsigned off) chip = container_of(gc, struct max732x_chip, gpio_chip); - ret = max732x_read(chip, is_group_a(chip, off), ®_val); + ret = max732x_readb(chip, is_group_a(chip, off), ®_val); if (ret < 0) return 0; @@ -162,7 +206,7 @@ static void max732x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) reg_out = (off > 7) ? chip->reg_out[1] : chip->reg_out[0]; reg_out = (val) ? reg_out | mask : reg_out & ~mask; - ret = max732x_write(chip, is_group_a(chip, off), reg_out); + ret = max732x_writeb(chip, is_group_a(chip, off), reg_out); if (ret < 0) goto out; @@ -188,6 +232,13 @@ static int max732x_gpio_direction_input(struct gpio_chip *gc, unsigned off) return -EACCES; } + /* + * Open-drain pins must be set to high impedance (which is + * equivalent to output-high) to be turned into an input. + */ + if ((mask & chip->dir_output)) + max732x_gpio_set_value(gc, off, 1); + return 0; } @@ -209,12 +260,278 @@ static int max732x_gpio_direction_output(struct gpio_chip *gc, return 0; } +#ifdef CONFIG_GPIO_MAX732X_IRQ +static int max732x_writew(struct max732x_chip *chip, uint16_t val) +{ + int ret; + + val = cpu_to_le16(val); + + ret = i2c_master_send(chip->client_group_a, (char *)&val, 2); + if (ret < 0) { + dev_err(&chip->client_group_a->dev, "failed writing\n"); + return ret; + } + + return 0; +} + +static int max732x_readw(struct max732x_chip *chip, uint16_t *val) +{ + int ret; + + ret = i2c_master_recv(chip->client_group_a, (char *)val, 2); + if (ret < 0) { + dev_err(&chip->client_group_a->dev, "failed reading\n"); + return ret; + } + + *val = le16_to_cpu(*val); + return 0; +} + +static void max732x_irq_update_mask(struct max732x_chip *chip) +{ + uint16_t msg; + + if (chip->irq_mask == chip->irq_mask_cur) + return; + + chip->irq_mask = chip->irq_mask_cur; + + if (chip->irq_features == INT_NO_MASK) + return; + + mutex_lock(&chip->lock); + + switch (chip->irq_features) { + case INT_INDEP_MASK: + msg = (chip->irq_mask << 8) | chip->reg_out[0]; + max732x_writew(chip, msg); + break; + + case INT_MERGED_MASK: + msg = chip->irq_mask | chip->reg_out[0]; + max732x_writeb(chip, 1, (uint8_t)msg); + break; + } + + mutex_unlock(&chip->lock); +} + +static int max732x_gpio_to_irq(struct gpio_chip *gc, unsigned off) +{ + struct max732x_chip *chip; + + chip = container_of(gc, struct max732x_chip, gpio_chip); + return chip->irq_base + off; +} + +static void max732x_irq_mask(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base)); +} + +static void max732x_irq_unmask(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + chip->irq_mask_cur |= 1 << (irq - chip->irq_base); +} + +static void max732x_irq_bus_lock(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + mutex_lock(&chip->irq_lock); + chip->irq_mask_cur = chip->irq_mask; +} + +static void max732x_irq_bus_sync_unlock(unsigned int irq) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + + max732x_irq_update_mask(chip); + mutex_unlock(&chip->irq_lock); +} + +static int max732x_irq_set_type(unsigned int irq, unsigned int type) +{ + struct max732x_chip *chip = get_irq_chip_data(irq); + uint16_t off = irq - chip->irq_base; + uint16_t mask = 1 << off; + + if (!(mask & chip->dir_input)) { + dev_dbg(&chip->client->dev, "%s port %d is output only\n", + chip->client->name, off); + return -EACCES; + } + + if (!(type & IRQ_TYPE_EDGE_BOTH)) { + dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", + irq, type); + return -EINVAL; + } + + if (type & IRQ_TYPE_EDGE_FALLING) + chip->irq_trig_fall |= mask; + else + chip->irq_trig_fall &= ~mask; + + if (type & IRQ_TYPE_EDGE_RISING) + chip->irq_trig_raise |= mask; + else + chip->irq_trig_raise &= ~mask; + + return max732x_gpio_direction_input(&chip->gpio_chip, off); +} + +static struct irq_chip max732x_irq_chip = { + .name = "max732x", + .mask = max732x_irq_mask, + .unmask = max732x_irq_unmask, + .bus_lock = max732x_irq_bus_lock, + .bus_sync_unlock = max732x_irq_bus_sync_unlock, + .set_type = max732x_irq_set_type, +}; + +static uint8_t max732x_irq_pending(struct max732x_chip *chip) +{ + uint8_t cur_stat; + uint8_t old_stat; + uint8_t trigger; + uint8_t pending; + uint16_t status; + int ret; + + ret = max732x_readw(chip, &status); + if (ret) + return 0; + + trigger = status >> 8; + trigger &= chip->irq_mask; + + if (!trigger) + return 0; + + cur_stat = status & 0xFF; + cur_stat &= chip->irq_mask; + + old_stat = cur_stat ^ trigger; + + pending = (old_stat & chip->irq_trig_fall) | + (cur_stat & chip->irq_trig_raise); + pending &= trigger; + + return pending; +} + +static irqreturn_t max732x_irq_handler(int irq, void *devid) +{ + struct max732x_chip *chip = devid; + uint8_t pending; + uint8_t level; + + pending = max732x_irq_pending(chip); + + if (!pending) + return IRQ_HANDLED; + + do { + level = __ffs(pending); + handle_nested_irq(level + chip->irq_base); + + pending &= ~(1 << level); + } while (pending); + + return IRQ_HANDLED; +} + +static int max732x_irq_setup(struct max732x_chip *chip, + const struct i2c_device_id *id) +{ + struct i2c_client *client = chip->client; + struct max732x_platform_data *pdata = client->dev.platform_data; + int has_irq = max732x_features[id->driver_data] >> 32; + int ret; + + if (pdata->irq_base && has_irq != INT_NONE) { + int lvl; + + chip->irq_base = pdata->irq_base; + chip->irq_features = has_irq; + mutex_init(&chip->irq_lock); + + for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { + int irq = lvl + chip->irq_base; + + if (!(chip->dir_input & (1 << lvl))) + continue; + + set_irq_chip_data(irq, chip); + set_irq_chip_and_handler(irq, &max732x_irq_chip, + handle_edge_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + ret = request_threaded_irq(client->irq, + NULL, + max732x_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(&client->dev), chip); + if (ret) { + dev_err(&client->dev, "failed to request irq %d\n", + client->irq); + goto out_failed; + } + + chip->gpio_chip.to_irq = max732x_gpio_to_irq; + } + + return 0; + +out_failed: + chip->irq_base = 0; + return ret; +} + +static void max732x_irq_teardown(struct max732x_chip *chip) +{ + if (chip->irq_base) + free_irq(chip->client->irq, chip); +} +#else /* CONFIG_GPIO_MAX732X_IRQ */ +static int max732x_irq_setup(struct max732x_chip *chip, + const struct i2c_device_id *id) +{ + struct i2c_client *client = chip->client; + struct max732x_platform_data *pdata = client->dev.platform_data; + int has_irq = max732x_features[id->driver_data] >> 32; + + if (pdata->irq_base && has_irq != INT_NONE) + dev_warn(&client->dev, "interrupt support not compiled in\n"); + + return 0; +} + +static void max732x_irq_teardown(struct max732x_chip *chip) +{ +} +#endif + static int __devinit max732x_setup_gpio(struct max732x_chip *chip, const struct i2c_device_id *id, unsigned gpio_start) { struct gpio_chip *gc = &chip->gpio_chip; - uint32_t id_data = id->driver_data; + uint32_t id_data = (uint32_t)max732x_features[id->driver_data]; int i, port = 0; for (i = 0; i < 16; i++, id_data >>= 2) { @@ -285,14 +602,14 @@ static int __devinit max732x_probe(struct i2c_client *client, switch (client->addr & 0x70) { case 0x60: chip->client_group_a = client; - if (nr_port > 7) { + if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_b); chip->client_group_b = chip->client_dummy = c; } break; case 0x50: chip->client_group_b = client; - if (nr_port > 7) { + if (nr_port > 8) { c = i2c_new_dummy(client->adapter, addr_a); chip->client_group_a = chip->client_dummy = c; } @@ -306,9 +623,13 @@ static int __devinit max732x_probe(struct i2c_client *client, mutex_init(&chip->lock); - max732x_read(chip, is_group_a(chip, 0), &chip->reg_out[0]); - if (nr_port > 7) - max732x_read(chip, is_group_a(chip, 8), &chip->reg_out[1]); + max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]); + if (nr_port > 8) + max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]); + + ret = max732x_irq_setup(chip, id); + if (ret) + goto out_failed; ret = gpiochip_add(&chip->gpio_chip); if (ret) @@ -325,6 +646,7 @@ static int __devinit max732x_probe(struct i2c_client *client, return 0; out_failed: + max732x_irq_teardown(chip); kfree(chip); return ret; } @@ -352,6 +674,8 @@ static int __devexit max732x_remove(struct i2c_client *client) return ret; } + max732x_irq_teardown(chip); + /* unregister any dummy i2c_client */ if (chip->client_dummy) i2c_unregister_device(chip->client_dummy); diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index f156ab3bb6ed..a2b12aa1f2b9 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c @@ -73,7 +73,7 @@ struct pca953x_chip { struct i2c_client *client; struct pca953x_platform_data *dyn_pdata; struct gpio_chip gpio_chip; - char **names; + const char *const *names; }; static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 105701a1f05b..ee568c8fcbd0 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -164,7 +164,7 @@ static int pl061_irq_type(unsigned irq, unsigned trigger) unsigned long flags; u8 gpiois, gpioibe, gpioiev; - if (offset < 0 || offset > PL061_GPIO_NR) + if (offset < 0 || offset >= PL061_GPIO_NR) return -EINVAL; spin_lock_irqsave(&chip->irq_lock, flags); diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c new file mode 100644 index 000000000000..2762698e0204 --- /dev/null +++ b/drivers/gpio/rdc321x-gpio.c @@ -0,0 +1,246 @@ +/* + * RDC321x GPIO driver + * + * Copyright (C) 2008, Volker Weiss + * Copyright (C) 2007-2010 Florian Fainelli + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rdc321x_gpio { + spinlock_t lock; + struct pci_dev *sb_pdev; + u32 data_reg[2]; + int reg1_ctrl_base; + int reg1_data_base; + int reg2_ctrl_base; + int reg2_data_base; + struct gpio_chip chip; +}; + +/* read GPIO pin */ +static int rdc_gpio_get_value(struct gpio_chip *chip, unsigned gpio) +{ + struct rdc321x_gpio *gpch; + u32 value = 0; + int reg; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + reg = gpio < 32 ? gpch->reg1_data_base : gpch->reg2_data_base; + + spin_lock(&gpch->lock); + pci_write_config_dword(gpch->sb_pdev, reg, + gpch->data_reg[gpio < 32 ? 0 : 1]); + pci_read_config_dword(gpch->sb_pdev, reg, &value); + spin_unlock(&gpch->lock); + + return (1 << (gpio & 0x1f)) & value ? 1 : 0; +} + +static void rdc_gpio_set_value_impl(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + int reg = (gpio < 32) ? 0 : 1; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + + if (value) + gpch->data_reg[reg] |= 1 << (gpio & 0x1f); + else + gpch->data_reg[reg] &= ~(1 << (gpio & 0x1f)); + + pci_write_config_dword(gpch->sb_pdev, + reg ? gpch->reg2_data_base : gpch->reg1_data_base, + gpch->data_reg[reg]); +} + +/* set GPIO pin to value */ +static void rdc_gpio_set_value(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + spin_lock(&gpch->lock); + rdc_gpio_set_value_impl(chip, gpio, value); + spin_unlock(&gpch->lock); +} + +static int rdc_gpio_config(struct gpio_chip *chip, + unsigned gpio, int value) +{ + struct rdc321x_gpio *gpch; + int err; + u32 reg; + + gpch = container_of(chip, struct rdc321x_gpio, chip); + + spin_lock(&gpch->lock); + err = pci_read_config_dword(gpch->sb_pdev, gpio < 32 ? + gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, ®); + if (err) + goto unlock; + + reg |= 1 << (gpio & 0x1f); + + err = pci_write_config_dword(gpch->sb_pdev, gpio < 32 ? + gpch->reg1_ctrl_base : gpch->reg2_ctrl_base, reg); + if (err) + goto unlock; + + rdc_gpio_set_value_impl(chip, gpio, value); + +unlock: + spin_unlock(&gpch->lock); + + return err; +} + +/* configure GPIO pin as input */ +static int rdc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) +{ + return rdc_gpio_config(chip, gpio, 1); +} + +/* + * Cache the initial value of both GPIO data registers + */ +static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) +{ + int err; + struct resource *r; + struct rdc321x_gpio *rdc321x_gpio_dev; + struct rdc321x_gpio_pdata *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data supplied\n"); + return -ENODEV; + } + + rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL); + if (!rdc321x_gpio_dev) { + dev_err(&pdev->dev, "failed to allocate private data\n"); + return -ENOMEM; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); + if (!r) { + dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); + err = -ENODEV; + goto out_free; + } + + spin_lock_init(&rdc321x_gpio_dev->lock); + rdc321x_gpio_dev->sb_pdev = pdata->sb_pdev; + rdc321x_gpio_dev->reg1_ctrl_base = r->start; + rdc321x_gpio_dev->reg1_data_base = r->start + 0x4; + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); + if (!r) { + dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); + err = -ENODEV; + goto out_free; + } + + rdc321x_gpio_dev->reg2_ctrl_base = r->start; + rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; + + rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; + rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; + rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; + rdc321x_gpio_dev->chip.set = rdc_gpio_set_value; + rdc321x_gpio_dev->chip.base = 0; + rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios; + + platform_set_drvdata(pdev, rdc321x_gpio_dev); + + /* This might not be, what others (BIOS, bootloader, etc.) + wrote to these registers before, but it's a good guess. Still + better than just using 0xffffffff. */ + err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, + rdc321x_gpio_dev->reg1_data_base, + &rdc321x_gpio_dev->data_reg[0]); + if (err) + goto out_drvdata; + + err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, + rdc321x_gpio_dev->reg2_data_base, + &rdc321x_gpio_dev->data_reg[1]); + if (err) + goto out_drvdata; + + dev_info(&pdev->dev, "registering %d GPIOs\n", + rdc321x_gpio_dev->chip.ngpio); + return gpiochip_add(&rdc321x_gpio_dev->chip); + +out_drvdata: + platform_set_drvdata(pdev, NULL); +out_free: + kfree(rdc321x_gpio_dev); + return err; +} + +static int __devexit rdc321x_gpio_remove(struct platform_device *pdev) +{ + int ret; + struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev); + + ret = gpiochip_remove(&rdc321x_gpio_dev->chip); + if (ret) + dev_err(&pdev->dev, "failed to unregister chip\n"); + + kfree(rdc321x_gpio_dev); + platform_set_drvdata(pdev, NULL); + + return ret; +} + +static struct platform_driver rdc321x_gpio_driver = { + .driver.name = "rdc321x-gpio", + .driver.owner = THIS_MODULE, + .probe = rdc321x_gpio_probe, + .remove = __devexit_p(rdc321x_gpio_remove), +}; + +static int __init rdc321x_gpio_init(void) +{ + return platform_driver_register(&rdc321x_gpio_driver); +} + +static void __exit rdc321x_gpio_exit(void) +{ + platform_driver_unregister(&rdc321x_gpio_driver); +} + +module_init(rdc321x_gpio_init); +module_exit(rdc321x_gpio_exit); + +MODULE_AUTHOR("Florian Fainelli "); +MODULE_DESCRIPTION("RDC321x GPIO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rdc321x-gpio"); diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c new file mode 100644 index 000000000000..1be6288780de --- /dev/null +++ b/drivers/gpio/tc35892-gpio.c @@ -0,0 +1,381 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Hanumath Prasad for ST-Ericsson + * Author: Rabin Vincent for ST-Ericsson + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * These registers are modified under the irq bus lock and cached to avoid + * unnecessary writes in bus_sync_unlock. + */ +enum { REG_IBE, REG_IEV, REG_IS, REG_IE }; + +#define CACHE_NR_REGS 4 +#define CACHE_NR_BANKS 3 + +struct tc35892_gpio { + struct gpio_chip chip; + struct tc35892 *tc35892; + struct device *dev; + struct mutex irq_lock; + + int irq_base; + + /* Caches of interrupt control registers for bus_lock */ + u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; + u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; +}; + +static inline struct tc35892_gpio *to_tc35892_gpio(struct gpio_chip *chip) +{ + return container_of(chip, struct tc35892_gpio, chip); +} + +static int tc35892_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2; + u8 mask = 1 << (offset % 8); + int ret; + + ret = tc35892_reg_read(tc35892, reg); + if (ret < 0) + return ret; + + return ret & mask; +} + +static void tc35892_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODATA0 + (offset / 8) * 2; + unsigned pos = offset % 8; + u8 data[] = {!!val << pos, 1 << pos}; + + tc35892_block_write(tc35892, reg, ARRAY_SIZE(data), data); +} + +static int tc35892_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, int val) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODIR0 + offset / 8; + unsigned pos = offset % 8; + + tc35892_gpio_set(chip, offset, val); + + return tc35892_set_bits(tc35892, reg, 1 << pos, 1 << pos); +} + +static int tc35892_gpio_direction_input(struct gpio_chip *chip, + unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 reg = TC35892_GPIODIR0 + offset / 8; + unsigned pos = offset % 8; + + return tc35892_set_bits(tc35892, reg, 1 << pos, 0); +} + +static int tc35892_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + struct tc35892_gpio *tc35892_gpio = to_tc35892_gpio(chip); + + return tc35892_gpio->irq_base + offset; +} + +static struct gpio_chip template_chip = { + .label = "tc35892", + .owner = THIS_MODULE, + .direction_input = tc35892_gpio_direction_input, + .get = tc35892_gpio_get, + .direction_output = tc35892_gpio_direction_output, + .set = tc35892_gpio_set, + .to_irq = tc35892_gpio_to_irq, + .can_sleep = 1, +}; + +static int tc35892_gpio_irq_set_type(unsigned int irq, unsigned int type) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + if (type == IRQ_TYPE_EDGE_BOTH) { + tc35892_gpio->regs[REG_IBE][regoffset] |= mask; + return 0; + } + + tc35892_gpio->regs[REG_IBE][regoffset] &= ~mask; + + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) + tc35892_gpio->regs[REG_IS][regoffset] |= mask; + else + tc35892_gpio->regs[REG_IS][regoffset] &= ~mask; + + if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) + tc35892_gpio->regs[REG_IEV][regoffset] |= mask; + else + tc35892_gpio->regs[REG_IEV][regoffset] &= ~mask; + + return 0; +} + +static void tc35892_gpio_irq_lock(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + + mutex_lock(&tc35892_gpio->irq_lock); +} + +static void tc35892_gpio_irq_sync_unlock(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + static const u8 regmap[] = { + [REG_IBE] = TC35892_GPIOIBE0, + [REG_IEV] = TC35892_GPIOIEV0, + [REG_IS] = TC35892_GPIOIS0, + [REG_IE] = TC35892_GPIOIE0, + }; + int i, j; + + for (i = 0; i < CACHE_NR_REGS; i++) { + for (j = 0; j < CACHE_NR_BANKS; j++) { + u8 old = tc35892_gpio->oldregs[i][j]; + u8 new = tc35892_gpio->regs[i][j]; + + if (new == old) + continue; + + tc35892_gpio->oldregs[i][j] = new; + tc35892_reg_write(tc35892, regmap[i] + j * 8, new); + } + } + + mutex_unlock(&tc35892_gpio->irq_lock); +} + +static void tc35892_gpio_irq_mask(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + tc35892_gpio->regs[REG_IE][regoffset] &= ~mask; +} + +static void tc35892_gpio_irq_unmask(unsigned int irq) +{ + struct tc35892_gpio *tc35892_gpio = get_irq_chip_data(irq); + int offset = irq - tc35892_gpio->irq_base; + int regoffset = offset / 8; + int mask = 1 << (offset % 8); + + tc35892_gpio->regs[REG_IE][regoffset] |= mask; +} + +static struct irq_chip tc35892_gpio_irq_chip = { + .name = "tc35892-gpio", + .bus_lock = tc35892_gpio_irq_lock, + .bus_sync_unlock = tc35892_gpio_irq_sync_unlock, + .mask = tc35892_gpio_irq_mask, + .unmask = tc35892_gpio_irq_unmask, + .set_type = tc35892_gpio_irq_set_type, +}; + +static irqreturn_t tc35892_gpio_irq(int irq, void *dev) +{ + struct tc35892_gpio *tc35892_gpio = dev; + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + u8 status[CACHE_NR_BANKS]; + int ret; + int i; + + ret = tc35892_block_read(tc35892, TC35892_GPIOMIS0, + ARRAY_SIZE(status), status); + if (ret < 0) + return IRQ_NONE; + + for (i = 0; i < ARRAY_SIZE(status); i++) { + unsigned int stat = status[i]; + if (!stat) + continue; + + while (stat) { + int bit = __ffs(stat); + int line = i * 8 + bit; + + handle_nested_irq(tc35892_gpio->irq_base + line); + stat &= ~(1 << bit); + } + + tc35892_reg_write(tc35892, TC35892_GPIOIC0 + i, status[i]); + } + + return IRQ_HANDLED; +} + +static int tc35892_gpio_irq_init(struct tc35892_gpio *tc35892_gpio) +{ + int base = tc35892_gpio->irq_base; + int irq; + + for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) { + set_irq_chip_data(irq, tc35892_gpio); + set_irq_chip_and_handler(irq, &tc35892_gpio_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void tc35892_gpio_irq_remove(struct tc35892_gpio *tc35892_gpio) +{ + int base = tc35892_gpio->irq_base; + int irq; + + for (irq = base; irq < base + tc35892_gpio->chip.ngpio; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static int __devinit tc35892_gpio_probe(struct platform_device *pdev) +{ + struct tc35892 *tc35892 = dev_get_drvdata(pdev->dev.parent); + struct tc35892_gpio_platform_data *pdata; + struct tc35892_gpio *tc35892_gpio; + int ret; + int irq; + + pdata = tc35892->pdata->gpio; + if (!pdata) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + tc35892_gpio = kzalloc(sizeof(struct tc35892_gpio), GFP_KERNEL); + if (!tc35892_gpio) + return -ENOMEM; + + mutex_init(&tc35892_gpio->irq_lock); + + tc35892_gpio->dev = &pdev->dev; + tc35892_gpio->tc35892 = tc35892; + + tc35892_gpio->chip = template_chip; + tc35892_gpio->chip.ngpio = tc35892->num_gpio; + tc35892_gpio->chip.dev = &pdev->dev; + tc35892_gpio->chip.base = pdata->gpio_base; + + tc35892_gpio->irq_base = tc35892->irq_base + TC35892_INT_GPIO(0); + + /* Bring the GPIO module out of reset */ + ret = tc35892_set_bits(tc35892, TC35892_RSTCTRL, + TC35892_RSTCTRL_GPIRST, 0); + if (ret < 0) + goto out_free; + + ret = tc35892_gpio_irq_init(tc35892_gpio); + if (ret) + goto out_free; + + ret = request_threaded_irq(irq, NULL, tc35892_gpio_irq, IRQF_ONESHOT, + "tc35892-gpio", tc35892_gpio); + if (ret) { + dev_err(&pdev->dev, "unable to get irq: %d\n", ret); + goto out_removeirq; + } + + ret = gpiochip_add(&tc35892_gpio->chip); + if (ret) { + dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); + goto out_freeirq; + } + + platform_set_drvdata(pdev, tc35892_gpio); + + return 0; + +out_freeirq: + free_irq(irq, tc35892_gpio); +out_removeirq: + tc35892_gpio_irq_remove(tc35892_gpio); +out_free: + kfree(tc35892_gpio); + return ret; +} + +static int __devexit tc35892_gpio_remove(struct platform_device *pdev) +{ + struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev); + int irq = platform_get_irq(pdev, 0); + int ret; + + ret = gpiochip_remove(&tc35892_gpio->chip); + if (ret < 0) { + dev_err(tc35892_gpio->dev, + "unable to remove gpiochip: %d\n", ret); + return ret; + } + + free_irq(irq, tc35892_gpio); + tc35892_gpio_irq_remove(tc35892_gpio); + + platform_set_drvdata(pdev, NULL); + kfree(tc35892_gpio); + + return 0; +} + +static struct platform_driver tc35892_gpio_driver = { + .driver.name = "tc35892-gpio", + .driver.owner = THIS_MODULE, + .probe = tc35892_gpio_probe, + .remove = __devexit_p(tc35892_gpio_remove), +}; + +static int __init tc35892_gpio_init(void) +{ + return platform_driver_register(&tc35892_gpio_driver); +} +subsys_initcall(tc35892_gpio_init); + +static void __exit tc35892_gpio_exit(void) +{ + platform_driver_unregister(&tc35892_gpio_driver); +} +module_exit(tc35892_gpio_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TC35892 GPIO driver"); +MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent"); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index f569ae88ab38..c1981861bbbd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -147,7 +147,10 @@ drm_edid_block_valid(u8 *raw_edid) csum += raw_edid[i]; if (csum) { DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); - goto bad; + + /* allow CEA to slide through, switches mangle this */ + if (raw_edid[0] != 0x02) + goto bad; } /* per-block-type checks */ diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7e663a79829f..266b0ff441af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -241,7 +241,8 @@ nouveau_connector_detect(struct drm_connector *connector) if (nv_encoder && nv_connector->native_mode) { unsigned status = connector_status_connected; -#ifdef CONFIG_ACPI +#if defined(CONFIG_ACPI_BUTTON) || \ + (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) if (!nouveau_ignorelid && !acpi_lid_open()) status = connector_status_unknown; #endif diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 0616c96e4b67..704a25d04ac9 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -253,7 +253,11 @@ nv40_graph_init(struct drm_device *dev) if (!dev_priv->engine.graph.ctxprog) { struct nouveau_grctx ctx = {}; - uint32_t cp[256]; + uint32_t *cp; + + cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); + if (!cp) + return -ENOMEM; ctx.dev = dev; ctx.mode = NOUVEAU_GRCTX_PROG; @@ -265,6 +269,8 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); for (i = 0; i < ctx.ctxprog_len; i++) nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + + kfree(cp); } /* No context present currently */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 03dd6c41dc19..f3f2827017ef 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -707,6 +707,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; case ATOM_DCPLL: case ATOM_PPLL_INVALID: + default: pll = &rdev->clock.dcpll; break; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 66a37fb75839..669feb689bfc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -576,6 +576,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, */ int radeon_agp_init(struct radeon_device *rdev); void radeon_agp_resume(struct radeon_device *rdev); +void radeon_agp_suspend(struct radeon_device *rdev); void radeon_agp_fini(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 28e473f1f56f..f40dfb77f9b1 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) } #endif } + +void radeon_agp_suspend(struct radeon_device *rdev) +{ + radeon_agp_fini(rdev); +} diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6e733fdc3349..24ea683f7cf5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -680,10 +680,18 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; + struct bios_connector *bios_connectors; + size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; - if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + bios_connectors = kzalloc(bc_size, GFP_KERNEL); + if (!bios_connectors) + return false; + + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset)) { + kfree(bios_connectors); return false; + } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); @@ -851,6 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct radeon_link_encoder_connector(dev); + kfree(bios_connectors); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a20b612ffe75..fdc3fdf78acb 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -754,6 +754,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) /* evict remaining vram memory */ radeon_bo_evict_vram(rdev); + radeon_agp_suspend(rdev); + pci_save_state(dev->pdev); if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index cc5316dcf580..b3ba44c0a818 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, flags |= RADEON_FRONT; } if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { - if (!dev_priv->have_z_offset) + if (!dev_priv->have_z_offset) { printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); - flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + } } if (flags & (RADEON_FRONT | RADEON_BACK)) { diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 56f314fbd4f9..c94026768570 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -811,7 +811,7 @@ static const char *relatives[REL_MAX + 1] = { [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", }; -static const char *absolutes[ABS_MAX + 1] = { +static const char *absolutes[ABS_CNT] = { [ABS_X] = "X", [ABS_Y] = "Y", [ABS_Z] = "Z", [ABS_RX] = "Rx", [ABS_RY] = "Ry", [ABS_RZ] = "Rz", diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6a9ac754ca5d..e19cf8eb6ccf 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -447,13 +447,14 @@ config SENSORS_IT87 will be called it87. config SENSORS_LM63 - tristate "National Semiconductor LM63" + tristate "National Semiconductor LM63 and LM64" depends on I2C help - If you say yes here you get support for the National Semiconductor - LM63 remote diode digital temperature sensor with integrated fan - control. Such chips are found on the Tyan S4882 (Thunder K8QS Pro) - motherboard, among others. + If you say yes here you get support for the National + Semiconductor LM63 and LM64 remote diode digital temperature + sensors with integrated fan control. Such chips are found + on the Tyan S4882 (Thunder K8QS Pro) motherboard, among + others. This driver can also be built as a module. If so, the module will be called lm63. @@ -492,7 +493,8 @@ config SENSORS_LM75 - NXP's LM75A - ST Microelectronics STDS75 - TelCom (now Microchip) TCN75 - - Texas Instruments TMP100, TMP101, TMP75, TMP175, TMP275 + - Texas Instruments TMP100, TMP101, TMP105, TMP75, TMP175, + TMP275 This driver supports driver model based binding through board specific I2C device tables. @@ -749,6 +751,16 @@ config SENSORS_DME1737 This driver can also be built as a module. If so, the module will be called dme1737. +config SENSORS_EMC1403 + tristate "SMSC EMC1403 thermal sensor" + depends on I2C + help + If you say yes here you get support for the SMSC EMC1403 + temperature monitoring chip. + + Threshold values can be configured using sysfs. + Data from the different diodes are accessible via sysfs. + config SENSORS_SMSC47M1 tristate "SMSC LPC47M10x and compatibles" help @@ -831,6 +843,16 @@ config SENSORS_THMC50 This driver can also be built as a module. If so, the module will be called thmc50. +config SENSORS_TMP102 + tristate "Texas Instruments TMP102" + depends on I2C && EXPERIMENTAL + help + If you say yes here you get support for Texas Instruments TMP102 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called tmp102. + config SENSORS_TMP401 tristate "Texas Instruments TMP401 and compatibles" depends on I2C && EXPERIMENTAL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 86920fb34118..2138ceb1a713 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o +obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o obj-$(CONFIG_SENSORS_F71805F) += f71805f.o obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o obj-$(CONFIG_SENSORS_F75375S) += f75375s.o @@ -90,6 +91,7 @@ obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o obj-$(CONFIG_SENSORS_THMC50) += thmc50.o +obj-$(CONFIG_SENSORS_TMP102) += tmp102.o obj-$(CONFIG_SENSORS_TMP401) += tmp401.o obj-$(CONFIG_SENSORS_TMP421) += tmp421.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c index 1644b92e7cc4..15c1a9616af3 100644 --- a/drivers/hwmon/adm1031.c +++ b/drivers/hwmon/adm1031.c @@ -36,6 +36,7 @@ #define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr)) #define ADM1031_REG_PWM (0x22) #define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr)) +#define ADM1031_REG_FAN_FILTER (0x23) #define ADM1031_REG_TEMP_OFFSET(nr) (0x0d + (nr)) #define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4 * (nr)) @@ -61,6 +62,9 @@ #define ADM1031_CONF2_TACH2_ENABLE 0x08 #define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan)) +#define ADM1031_UPDATE_RATE_MASK 0x1c +#define ADM1031_UPDATE_RATE_SHIFT 2 + /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; @@ -75,6 +79,7 @@ struct adm1031_data { int chip_type; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ + unsigned int update_rate; /* In milliseconds */ /* The chan_select_table contains the possible configurations for * auto fan control. */ @@ -738,6 +743,57 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); +/* Update Rate */ +static const unsigned int update_rates[] = { + 16000, 8000, 4000, 2000, 1000, 500, 250, 125, +}; + +static ssize_t show_update_rate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + + return sprintf(buf, "%u\n", data->update_rate); +} + +static ssize_t set_update_rate(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + unsigned long val; + int i, err; + u8 reg; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; + + /* find the nearest update rate from the table */ + for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { + if (val >= update_rates[i]) + break; + } + /* if not found, we point to the last entry (lowest update rate) */ + + /* set the new update rate while preserving other settings */ + reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); + reg &= ~ADM1031_UPDATE_RATE_MASK; + reg |= i << ADM1031_UPDATE_RATE_SHIFT; + adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); + + mutex_lock(&data->update_lock); + data->update_rate = update_rates[i]; + mutex_unlock(&data->update_lock); + + return count; +} + +static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, + set_update_rate); + static struct attribute *adm1031_attributes[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_div.dev_attr.attr, @@ -774,6 +830,7 @@ static struct attribute *adm1031_attributes[] = { &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, + &dev_attr_update_rate.attr, &dev_attr_alarms.attr, NULL @@ -900,6 +957,7 @@ static void adm1031_init_client(struct i2c_client *client) { unsigned int read_val; unsigned int mask; + int i; struct adm1031_data *data = i2c_get_clientdata(client); mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE); @@ -919,18 +977,24 @@ static void adm1031_init_client(struct i2c_client *client) ADM1031_CONF1_MONITOR_ENABLE); } + /* Read the chip's update rate */ + mask = ADM1031_UPDATE_RATE_MASK; + read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); + i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; + data->update_rate = update_rates[i]; } static struct adm1031_data *adm1031_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct adm1031_data *data = i2c_get_clientdata(client); + unsigned long next_update; int chan; mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + HZ + HZ / 2) - || !data->valid) { + next_update = data->last_updated + msecs_to_jiffies(data->update_rate); + if (time_after(jiffies, next_update) || !data->valid) { dev_dbg(&client->dev, "Starting adm1031 update\n"); for (chan = 0; diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index f085c18d2905..b6598aa557a0 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -148,6 +148,20 @@ static const char *temperature_sensors_sets[][41] = { /* Set 18: MacBook Pro 2,2 */ { "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "TM0P", "TTF0", "Th0H", "Th1H", "Tm0P", "Ts0P", NULL }, +/* Set 19: Macbook Pro 5,3 */ + { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TG0D", + "TG0F", "TG0H", "TG0P", "TG0T", "TN0D", "TN0P", "TTF0", "Th2H", + "Tm0P", "Ts0P", "Ts0S", NULL }, +/* Set 20: MacBook Pro 5,4 */ + { "TB0T", "TB1T", "TB2T", "TB3T", "TC0D", "TC0F", "TC0P", "TN0D", + "TN0P", "TTF0", "Th2H", "Ts0P", "Ts0S", NULL }, +/* Set 21: MacBook Pro 6,2 */ + { "TB0T", "TB1T", "TB2T", "TC0C", "TC0D", "TC0P", "TC1C", "TG0D", + "TG0P", "TG0T", "TMCD", "TP0P", "TPCD", "Th1H", "Th2H", "Tm0P", + "Ts0P", "Ts0S", NULL }, +/* Set 22: MacBook Pro 7,1 */ + { "TB0T", "TB1T", "TB2T", "TC0D", "TC0P", "TN0D", "TN0P", "TN0S", + "TN1D", "TN1F", "TN1G", "TN1S", "Th1H", "Ts0P", "Ts0S", NULL }, }; /* List of keys used to read/write fan speeds */ @@ -646,6 +660,17 @@ out: return snprintf(sysfsbuf, PAGE_SIZE, "(%d,%d)\n", left, right); } +/* Displays sensor key as label */ +static ssize_t applesmc_show_sensor_label(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + const char *key = + temperature_sensors_sets[applesmc_temperature_set][attr->index]; + + return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key); +} + /* Displays degree Celsius * 1000 */ static ssize_t applesmc_show_temperature(struct device *dev, struct device_attribute *devattr, char *sysfsbuf) @@ -1113,6 +1138,86 @@ static const struct attribute_group fan_attribute_groups[] = { /* * Temperature sensors sysfs entries. */ +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp5_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 5); +static SENSOR_DEVICE_ATTR(temp7_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 6); +static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 7); +static SENSOR_DEVICE_ATTR(temp9_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 8); +static SENSOR_DEVICE_ATTR(temp10_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 9); +static SENSOR_DEVICE_ATTR(temp11_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 10); +static SENSOR_DEVICE_ATTR(temp12_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 11); +static SENSOR_DEVICE_ATTR(temp13_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 12); +static SENSOR_DEVICE_ATTR(temp14_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 13); +static SENSOR_DEVICE_ATTR(temp15_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 14); +static SENSOR_DEVICE_ATTR(temp16_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 15); +static SENSOR_DEVICE_ATTR(temp17_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 16); +static SENSOR_DEVICE_ATTR(temp18_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 17); +static SENSOR_DEVICE_ATTR(temp19_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 18); +static SENSOR_DEVICE_ATTR(temp20_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 19); +static SENSOR_DEVICE_ATTR(temp21_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 20); +static SENSOR_DEVICE_ATTR(temp22_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 21); +static SENSOR_DEVICE_ATTR(temp23_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 22); +static SENSOR_DEVICE_ATTR(temp24_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 23); +static SENSOR_DEVICE_ATTR(temp25_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 24); +static SENSOR_DEVICE_ATTR(temp26_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 25); +static SENSOR_DEVICE_ATTR(temp27_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 26); +static SENSOR_DEVICE_ATTR(temp28_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 27); +static SENSOR_DEVICE_ATTR(temp29_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 28); +static SENSOR_DEVICE_ATTR(temp30_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 29); +static SENSOR_DEVICE_ATTR(temp31_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 30); +static SENSOR_DEVICE_ATTR(temp32_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 31); +static SENSOR_DEVICE_ATTR(temp33_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 32); +static SENSOR_DEVICE_ATTR(temp34_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 33); +static SENSOR_DEVICE_ATTR(temp35_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 34); +static SENSOR_DEVICE_ATTR(temp36_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 35); +static SENSOR_DEVICE_ATTR(temp37_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 36); +static SENSOR_DEVICE_ATTR(temp38_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 37); +static SENSOR_DEVICE_ATTR(temp39_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 38); +static SENSOR_DEVICE_ATTR(temp40_label, S_IRUGO, + applesmc_show_sensor_label, NULL, 39); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, applesmc_show_temperature, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, @@ -1194,6 +1299,50 @@ static SENSOR_DEVICE_ATTR(temp39_input, S_IRUGO, static SENSOR_DEVICE_ATTR(temp40_input, S_IRUGO, applesmc_show_temperature, NULL, 39); +static struct attribute *label_attributes[] = { + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + &sensor_dev_attr_temp5_label.dev_attr.attr, + &sensor_dev_attr_temp6_label.dev_attr.attr, + &sensor_dev_attr_temp7_label.dev_attr.attr, + &sensor_dev_attr_temp8_label.dev_attr.attr, + &sensor_dev_attr_temp9_label.dev_attr.attr, + &sensor_dev_attr_temp10_label.dev_attr.attr, + &sensor_dev_attr_temp11_label.dev_attr.attr, + &sensor_dev_attr_temp12_label.dev_attr.attr, + &sensor_dev_attr_temp13_label.dev_attr.attr, + &sensor_dev_attr_temp14_label.dev_attr.attr, + &sensor_dev_attr_temp15_label.dev_attr.attr, + &sensor_dev_attr_temp16_label.dev_attr.attr, + &sensor_dev_attr_temp17_label.dev_attr.attr, + &sensor_dev_attr_temp18_label.dev_attr.attr, + &sensor_dev_attr_temp19_label.dev_attr.attr, + &sensor_dev_attr_temp20_label.dev_attr.attr, + &sensor_dev_attr_temp21_label.dev_attr.attr, + &sensor_dev_attr_temp22_label.dev_attr.attr, + &sensor_dev_attr_temp23_label.dev_attr.attr, + &sensor_dev_attr_temp24_label.dev_attr.attr, + &sensor_dev_attr_temp25_label.dev_attr.attr, + &sensor_dev_attr_temp26_label.dev_attr.attr, + &sensor_dev_attr_temp27_label.dev_attr.attr, + &sensor_dev_attr_temp28_label.dev_attr.attr, + &sensor_dev_attr_temp29_label.dev_attr.attr, + &sensor_dev_attr_temp30_label.dev_attr.attr, + &sensor_dev_attr_temp31_label.dev_attr.attr, + &sensor_dev_attr_temp32_label.dev_attr.attr, + &sensor_dev_attr_temp33_label.dev_attr.attr, + &sensor_dev_attr_temp34_label.dev_attr.attr, + &sensor_dev_attr_temp35_label.dev_attr.attr, + &sensor_dev_attr_temp36_label.dev_attr.attr, + &sensor_dev_attr_temp37_label.dev_attr.attr, + &sensor_dev_attr_temp38_label.dev_attr.attr, + &sensor_dev_attr_temp39_label.dev_attr.attr, + &sensor_dev_attr_temp40_label.dev_attr.attr, + NULL +}; + static struct attribute *temperature_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, @@ -1241,6 +1390,10 @@ static struct attribute *temperature_attributes[] = { static const struct attribute_group temperature_attributes_group = { .attrs = temperature_attributes }; +static const struct attribute_group label_attributes_group = { + .attrs = label_attributes +}; + /* Module stuff */ /* @@ -1363,6 +1516,14 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = { { .accelerometer = 0, .light = 0, .temperature_set = 17 }, /* MacBook Pro 2,2: accelerometer, backlight and temperature set 18 */ { .accelerometer = 1, .light = 1, .temperature_set = 18 }, +/* MacBook Pro 5,3: accelerometer, backlight and temperature set 19 */ + { .accelerometer = 1, .light = 1, .temperature_set = 19 }, +/* MacBook Pro 5,4: accelerometer, backlight and temperature set 20 */ + { .accelerometer = 1, .light = 1, .temperature_set = 20 }, +/* MacBook Pro 6,2: accelerometer, backlight and temperature set 21 */ + { .accelerometer = 1, .light = 1, .temperature_set = 21 }, +/* MacBook Pro 7,1: accelerometer, backlight and temperature set 22 */ + { .accelerometer = 1, .light = 1, .temperature_set = 22 }, }; /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". @@ -1376,6 +1537,22 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir") }, &applesmc_dmi_data[7]}, + { applesmc_dmi_match, "Apple MacBook Pro 7", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro7") }, + &applesmc_dmi_data[22]}, + { applesmc_dmi_match, "Apple MacBook Pro 5,4", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,4") }, + &applesmc_dmi_data[20]}, + { applesmc_dmi_match, "Apple MacBook Pro 5,3", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,3") }, + &applesmc_dmi_data[19]}, + { applesmc_dmi_match, "Apple MacBook Pro 6", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6") }, + &applesmc_dmi_data[21]}, { applesmc_dmi_match, "Apple MacBook Pro 5", { DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5") }, @@ -1518,7 +1695,8 @@ static int __init applesmc_init(void) for (i = 0; temperature_sensors_sets[applesmc_temperature_set][i] != NULL; i++) { - if (temperature_attributes[i] == NULL) { + if (temperature_attributes[i] == NULL || + label_attributes[i] == NULL) { printk(KERN_ERR "applesmc: More temperature sensors " "in temperature_sensors_sets (at least %i)" "than available sysfs files in " @@ -1530,6 +1708,10 @@ static int __init applesmc_init(void) temperature_attributes[i]); if (ret) goto out_temperature; + ret = sysfs_create_file(&pdev->dev.kobj, + label_attributes[i]); + if (ret) + goto out_temperature; } if (applesmc_accelerometer) { @@ -1580,6 +1762,7 @@ out_accelerometer: if (applesmc_accelerometer) applesmc_release_accelerometer(); out_temperature: + sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group); sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); out_fans: while (fans_handled) @@ -1609,6 +1792,7 @@ static void __exit applesmc_exit(void) } if (applesmc_accelerometer) applesmc_release_accelerometer(); + sysfs_remove_group(&pdev->dev.kobj, &label_attributes_group); sysfs_remove_group(&pdev->dev.kobj, &temperature_attributes_group); while (fans_handled) sysfs_remove_group(&pdev->dev.kobj, diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 16c420240724..653db1bda934 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -1411,6 +1411,13 @@ static int __init atk0110_init(void) { int ret; + /* Make sure it's safe to access the device through ACPI */ + if (!acpi_resources_are_enforced()) { + pr_err("atk: Resources not safely usable due to " + "acpi_enforce_resources kernel parameter\n"); + return -EBUSY; + } + ret = acpi_bus_register_driver(&atk_driver); if (ret) pr_info("atk: acpi_bus_register_driver failed: %d\n", ret); diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 823dd28a902c..980c17d5eeae 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -1,12 +1,14 @@ /* - * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x and - * SCH5027 Super-I/O chips integrated hardware monitoring features. - * Copyright (c) 2007, 2008 Juerg Haefliger + * dme1737.c - Driver for the SMSC DME1737, Asus A8000, SMSC SCH311x, SCH5027, + * and SCH5127 Super-I/O chips integrated hardware monitoring + * features. + * Copyright (c) 2007, 2008, 2009, 2010 Juerg Haefliger * * This driver is an I2C/ISA hybrid, meaning that it uses the I2C bus to access * the chip registers if a DME1737, A8000, or SCH5027 is found and the ISA bus - * if a SCH311x chip is found. Both types of chips have very similar hardware - * monitoring capabilities but differ in the way they can be accessed. + * if a SCH311x or SCH5127 chip is found. Both types of chips have very + * similar hardware monitoring capabilities but differ in the way they can be + * accessed. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -57,7 +59,7 @@ MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC " /* Addresses to scan */ static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; -enum chips { dme1737, sch5027, sch311x }; +enum chips { dme1737, sch5027, sch311x, sch5127 }; /* --------------------------------------------------------------------- * Registers @@ -164,10 +166,29 @@ static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23}; #define DME1737_VERSTEP_MASK 0xf8 #define SCH311X_DEVICE 0x8c #define SCH5027_VERSTEP 0x69 +#define SCH5127_DEVICE 0x8e + +/* Device ID values (global configuration register index 0x20) */ +#define DME1737_ID_1 0x77 +#define DME1737_ID_2 0x78 +#define SCH3112_ID 0x7c +#define SCH3114_ID 0x7d +#define SCH3116_ID 0x7f +#define SCH5027_ID 0x89 +#define SCH5127_ID 0x86 /* Length of ISA address segment */ #define DME1737_EXTENT 2 +/* chip-dependent features */ +#define HAS_TEMP_OFFSET (1 << 0) /* bit 0 */ +#define HAS_VID (1 << 1) /* bit 1 */ +#define HAS_ZONE3 (1 << 2) /* bit 2 */ +#define HAS_ZONE_HYST (1 << 3) /* bit 3 */ +#define HAS_PWM_MIN (1 << 4) /* bit 4 */ +#define HAS_FAN(ix) (1 << ((ix) + 5)) /* bits 5-10 */ +#define HAS_PWM(ix) (1 << ((ix) + 11)) /* bits 11-16 */ + /* --------------------------------------------------------------------- * Data structures and manipulation thereof * --------------------------------------------------------------------- */ @@ -187,8 +208,7 @@ struct dme1737_data { u8 vid; u8 pwm_rr_en; - u8 has_pwm; - u8 has_fan; + u32 has_features; /* Register values */ u16 in[7]; @@ -224,8 +244,11 @@ static const int IN_NOMINAL_SCH311x[] = {2500, 1500, 3300, 5000, 12000, 3300, 3300}; static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300, 3300}; +static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300, + 3300}; #define IN_NOMINAL(type) ((type) == sch311x ? IN_NOMINAL_SCH311x : \ (type) == sch5027 ? IN_NOMINAL_SCH5027 : \ + (type) == sch5127 ? IN_NOMINAL_SCH5127 : \ IN_NOMINAL_DME1737) /* Voltage input @@ -568,7 +591,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Sample register contents every 1 sec */ if (time_after(jiffies, data->last_update + HZ) || !data->valid) { - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { data->vid = dme1737_read(data, DME1737_REG_VID) & 0x3f; } @@ -599,7 +622,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) DME1737_REG_TEMP_MIN(ix)); data->temp_max[ix] = dme1737_read(data, DME1737_REG_TEMP_MAX(ix)); - if (data->type != sch5027) { + if (data->has_features & HAS_TEMP_OFFSET) { data->temp_offset[ix] = dme1737_read(data, DME1737_REG_TEMP_OFFSET(ix)); } @@ -626,7 +649,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) for (ix = 0; ix < ARRAY_SIZE(data->fan); ix++) { /* Skip reading registers if optional fans are not * present */ - if (!(data->has_fan & (1 << ix))) { + if (!(data->has_features & HAS_FAN(ix))) { continue; } data->fan[ix] = dme1737_read(data, @@ -650,7 +673,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) for (ix = 0; ix < ARRAY_SIZE(data->pwm); ix++) { /* Skip reading registers if optional PWMs are not * present */ - if (!(data->has_pwm & (1 << ix))) { + if (!(data->has_features & HAS_PWM(ix))) { continue; } data->pwm[ix] = dme1737_read(data, @@ -672,12 +695,24 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Thermal zone registers */ for (ix = 0; ix < ARRAY_SIZE(data->zone_low); ix++) { - data->zone_low[ix] = dme1737_read(data, - DME1737_REG_ZONE_LOW(ix)); - data->zone_abs[ix] = dme1737_read(data, - DME1737_REG_ZONE_ABS(ix)); + /* Skip reading registers if zone3 is not present */ + if ((ix == 2) && !(data->has_features & HAS_ZONE3)) { + continue; + } + /* sch5127 zone2 registers are special */ + if ((ix == 1) && (data->type == sch5127)) { + data->zone_low[1] = dme1737_read(data, + DME1737_REG_ZONE_LOW(2)); + data->zone_abs[1] = dme1737_read(data, + DME1737_REG_ZONE_ABS(2)); + } else { + data->zone_low[ix] = dme1737_read(data, + DME1737_REG_ZONE_LOW(ix)); + data->zone_abs[ix] = dme1737_read(data, + DME1737_REG_ZONE_ABS(ix)); + } } - if (data->type != sch5027) { + if (data->has_features & HAS_ZONE_HYST) { for (ix = 0; ix < ARRAY_SIZE(data->zone_hyst); ix++) { data->zone_hyst[ix] = dme1737_read(data, DME1737_REG_ZONE_HYST(ix)); @@ -1594,10 +1629,6 @@ static struct attribute *dme1737_attr[] ={ &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_channels_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, - &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr, NULL }; @@ -1605,27 +1636,23 @@ static const struct attribute_group dme1737_group = { .attrs = dme1737_attr, }; -/* The following struct holds misc attributes, which are not available in all - * chips. Their creation depends on the chip type which is determined during - * module load. */ -static struct attribute *dme1737_misc_attr[] = { - /* Temperatures */ +/* The following struct holds temp offset attributes, which are not available + * in all chips. The following chips support them: + * DME1737, SCH311x */ +static struct attribute *dme1737_temp_offset_attr[] = { &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, - /* Zones */ - &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, - &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, - &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, NULL }; -static const struct attribute_group dme1737_misc_group = { - .attrs = dme1737_misc_attr, +static const struct attribute_group dme1737_temp_offset_group = { + .attrs = dme1737_temp_offset_attr, }; -/* The following struct holds VID-related attributes. Their creation - depends on the chip type which is determined during module load. */ +/* The following struct holds VID related attributes, which are not available + * in all chips. The following chips support them: + * DME1737 */ static struct attribute *dme1737_vid_attr[] = { &dev_attr_vrm.attr, &dev_attr_cpu0_vid.attr, @@ -1636,6 +1663,36 @@ static const struct attribute_group dme1737_vid_group = { .attrs = dme1737_vid_attr, }; +/* The following struct holds temp zone 3 related attributes, which are not + * available in all chips. The following chips support them: + * DME1737, SCH311x, SCH5027 */ +static struct attribute *dme1737_zone3_attr[] = { + &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, + &sensor_dev_attr_zone3_auto_channels_temp.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone3_group = { + .attrs = dme1737_zone3_attr, +}; + + +/* The following struct holds temp zone hysteresis related attributes, which + * are not available in all chips. The following chips support them: + * DME1737, SCH311x */ +static struct attribute *dme1737_zone_hyst_attr[] = { + &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, + &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone_hyst_group = { + .attrs = dme1737_zone_hyst_attr, +}; + /* The following structs hold the PWM attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ @@ -1691,10 +1748,10 @@ static const struct attribute_group dme1737_pwm_group[] = { { .attrs = dme1737_pwm6_attr }, }; -/* The following struct holds misc PWM attributes, which are not available in - * all chips. Their creation depends on the chip type which is determined +/* The following struct holds auto PWM min attributes, which are not available + * in all chips. Their creation depends on the chip type which is determined * during module load. */ -static struct attribute *dme1737_pwm_misc_attr[] = { +static struct attribute *dme1737_auto_pwm_min_attr[] = { &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr, @@ -1764,14 +1821,25 @@ static struct attribute *dme1737_zone_chmod_attr[] = { &sensor_dev_attr_zone2_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone2_auto_point3_temp.dev_attr.attr, + NULL +}; + +static const struct attribute_group dme1737_zone_chmod_group = { + .attrs = dme1737_zone_chmod_attr, +}; + + +/* The permissions of the following zone 3 attributes are changed to read- + * writeable if the chip is *not* locked. Otherwise they stay read-only. */ +static struct attribute *dme1737_zone3_chmod_attr[] = { &sensor_dev_attr_zone3_auto_point1_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point2_temp.dev_attr.attr, &sensor_dev_attr_zone3_auto_point3_temp.dev_attr.attr, NULL }; -static const struct attribute_group dme1737_zone_chmod_group = { - .attrs = dme1737_zone_chmod_attr, +static const struct attribute_group dme1737_zone3_chmod_group = { + .attrs = dme1737_zone3_chmod_attr, }; /* The permissions of the following PWM attributes are changed to read- @@ -1887,30 +1955,35 @@ static void dme1737_remove_files(struct device *dev) int ix; for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { - if (data->has_fan & (1 << ix)) { + if (data->has_features & HAS_FAN(ix)) { sysfs_remove_group(&dev->kobj, &dme1737_fan_group[ix]); } } for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { sysfs_remove_group(&dev->kobj, &dme1737_pwm_group[ix]); - if (data->type != sch5027 && ix < 3) { + if ((data->has_features & HAS_PWM_MIN) && ix < 3) { sysfs_remove_file(&dev->kobj, - dme1737_pwm_misc_attr[ix]); + dme1737_auto_pwm_min_attr[ix]); } } } - if (data->type != sch5027) { - sysfs_remove_group(&dev->kobj, &dme1737_misc_group); + if (data->has_features & HAS_TEMP_OFFSET) { + sysfs_remove_group(&dev->kobj, &dme1737_temp_offset_group); } - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { sysfs_remove_group(&dev->kobj, &dme1737_vid_group); } - + if (data->has_features & HAS_ZONE3) { + sysfs_remove_group(&dev->kobj, &dme1737_zone3_group); + } + if (data->has_features & HAS_ZONE_HYST) { + sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group); + } sysfs_remove_group(&dev->kobj, &dme1737_group); if (!data->client) { @@ -1934,23 +2007,31 @@ static int dme1737_create_files(struct device *dev) goto exit_remove; } - /* Create misc sysfs attributes */ - if ((data->type != sch5027) && + /* Create chip-dependent sysfs attributes */ + if ((data->has_features & HAS_TEMP_OFFSET) && (err = sysfs_create_group(&dev->kobj, - &dme1737_misc_group))) { + &dme1737_temp_offset_group))) { goto exit_remove; } - - /* Create VID-related sysfs attributes */ - if ((data->type == dme1737) && + if ((data->has_features & HAS_VID) && (err = sysfs_create_group(&dev->kobj, &dme1737_vid_group))) { goto exit_remove; } + if ((data->has_features & HAS_ZONE3) && + (err = sysfs_create_group(&dev->kobj, + &dme1737_zone3_group))) { + goto exit_remove; + } + if ((data->has_features & HAS_ZONE_HYST) && + (err = sysfs_create_group(&dev->kobj, + &dme1737_zone_hyst_group))) { + goto exit_remove; + } /* Create fan sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { - if (data->has_fan & (1 << ix)) { + if (data->has_features & HAS_FAN(ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_fan_group[ix]))) { goto exit_remove; @@ -1960,14 +2041,14 @@ static int dme1737_create_files(struct device *dev) /* Create PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { if ((err = sysfs_create_group(&dev->kobj, &dme1737_pwm_group[ix]))) { goto exit_remove; } - if (data->type != sch5027 && ix < 3 && + if ((data->has_features & HAS_PWM_MIN) && ix < 3 && (err = sysfs_create_file(&dev->kobj, - dme1737_pwm_misc_attr[ix]))) { + dme1737_auto_pwm_min_attr[ix]))) { goto exit_remove; } } @@ -1983,21 +2064,30 @@ static int dme1737_create_files(struct device *dev) dme1737_chmod_group(dev, &dme1737_zone_chmod_group, S_IRUGO | S_IWUSR); - /* Change permissions of misc sysfs attributes */ - if (data->type != sch5027) { - dme1737_chmod_group(dev, &dme1737_misc_group, + /* Change permissions of chip-dependent sysfs attributes */ + if (data->has_features & HAS_TEMP_OFFSET) { + dme1737_chmod_group(dev, &dme1737_temp_offset_group, + S_IRUGO | S_IWUSR); + } + if (data->has_features & HAS_ZONE3) { + dme1737_chmod_group(dev, &dme1737_zone3_chmod_group, + S_IRUGO | S_IWUSR); + } + if (data->has_features & HAS_ZONE_HYST) { + dme1737_chmod_group(dev, &dme1737_zone_hyst_group, S_IRUGO | S_IWUSR); } /* Change permissions of PWM sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_chmod_group); ix++) { - if (data->has_pwm & (1 << ix)) { + if (data->has_features & HAS_PWM(ix)) { dme1737_chmod_group(dev, &dme1737_pwm_chmod_group[ix], S_IRUGO | S_IWUSR); - if (data->type != sch5027 && ix < 3) { + if ((data->has_features & HAS_PWM_MIN) && + ix < 3) { dme1737_chmod_file(dev, - dme1737_pwm_misc_attr[ix], + dme1737_auto_pwm_min_attr[ix], S_IRUGO | S_IWUSR); } } @@ -2005,7 +2095,7 @@ static int dme1737_create_files(struct device *dev) /* Change permissions of pwm[1-3] if in manual mode */ for (ix = 0; ix < 3; ix++) { - if ((data->has_pwm & (1 << ix)) && + if ((data->has_features & HAS_PWM(ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == 1)) { dme1737_chmod_file(dev, dme1737_pwm_chmod_attr[ix], @@ -2052,20 +2142,20 @@ static int dme1737_init_device(struct device *dev) return -EFAULT; } - /* Determine which optional fan and pwm features are enabled/present */ + /* Determine which optional fan and pwm features are enabled (only + * valid for I2C devices) */ if (client) { /* I2C chip */ data->config2 = dme1737_read(data, DME1737_REG_CONFIG2); /* Check if optional fan3 input is enabled */ if (data->config2 & 0x04) { - data->has_fan |= (1 << 2); + data->has_features |= HAS_FAN(2); } /* Fan4 and pwm3 are only available if the client's I2C address * is the default 0x2e. Otherwise the I/Os associated with * these functions are used for addr enable/select. */ if (client->addr == 0x2e) { - data->has_fan |= (1 << 3); - data->has_pwm |= (1 << 2); + data->has_features |= HAS_FAN(3) | HAS_PWM(2); } /* Determine which of the optional fan[5-6] and pwm[5-6] @@ -2077,26 +2167,40 @@ static int dme1737_init_device(struct device *dev) dev_warn(dev, "Failed to query Super-IO for optional " "features.\n"); } - } else { /* ISA chip */ - /* Fan3 and pwm3 are always available. Fan[4-5] and pwm[5-6] - * don't exist in the ISA chip. */ - data->has_fan |= (1 << 2); - data->has_pwm |= (1 << 2); } - /* Fan1, fan2, pwm1, and pwm2 are always present */ - data->has_fan |= 0x03; - data->has_pwm |= 0x03; + /* Fan[1-2] and pwm[1-2] are present in all chips */ + data->has_features |= HAS_FAN(0) | HAS_FAN(1) | HAS_PWM(0) | HAS_PWM(1); + + /* Chip-dependent features */ + switch (data->type) { + case dme1737: + data->has_features |= HAS_TEMP_OFFSET | HAS_VID | HAS_ZONE3 | + HAS_ZONE_HYST | HAS_PWM_MIN; + break; + case sch311x: + data->has_features |= HAS_TEMP_OFFSET | HAS_ZONE3 | + HAS_ZONE_HYST | HAS_PWM_MIN | HAS_FAN(2) | HAS_PWM(2); + break; + case sch5027: + data->has_features |= HAS_ZONE3; + break; + case sch5127: + data->has_features |= HAS_FAN(2) | HAS_PWM(2); + break; + default: + break; + } dev_info(dev, "Optional features: pwm3=%s, pwm5=%s, pwm6=%s, " "fan3=%s, fan4=%s, fan5=%s, fan6=%s.\n", - (data->has_pwm & (1 << 2)) ? "yes" : "no", - (data->has_pwm & (1 << 4)) ? "yes" : "no", - (data->has_pwm & (1 << 5)) ? "yes" : "no", - (data->has_fan & (1 << 2)) ? "yes" : "no", - (data->has_fan & (1 << 3)) ? "yes" : "no", - (data->has_fan & (1 << 4)) ? "yes" : "no", - (data->has_fan & (1 << 5)) ? "yes" : "no"); + (data->has_features & HAS_PWM(2)) ? "yes" : "no", + (data->has_features & HAS_PWM(4)) ? "yes" : "no", + (data->has_features & HAS_PWM(5)) ? "yes" : "no", + (data->has_features & HAS_FAN(2)) ? "yes" : "no", + (data->has_features & HAS_FAN(3)) ? "yes" : "no", + (data->has_features & HAS_FAN(4)) ? "yes" : "no", + (data->has_features & HAS_FAN(5)) ? "yes" : "no"); reg = dme1737_read(data, DME1737_REG_TACH_PWM); /* Inform if fan-to-pwm mapping differs from the default */ @@ -2122,7 +2226,7 @@ static int dme1737_init_device(struct device *dev) for (ix = 0; ix < 3; ix++) { data->pwm_config[ix] = dme1737_read(data, DME1737_REG_PWM_CONFIG(ix)); - if ((data->has_pwm & (1 << ix)) && + if ((data->has_features & HAS_PWM(ix)) && (PWM_EN_FROM_REG(data->pwm_config[ix]) == -1)) { dev_info(dev, "Switching pwm%d to " "manual mode.\n", ix + 1); @@ -2142,7 +2246,7 @@ static int dme1737_init_device(struct device *dev) data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ /* Set VRM */ - if (data->type == dme1737) { + if (data->has_features & HAS_VID) { data->vrm = vid_which_vrm(); } @@ -2163,10 +2267,10 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) dme1737_sio_enter(sio_cip); /* Check device ID - * The DME1737 can return either 0x78 or 0x77 as its device ID. - * The SCH5027 returns 0x89 as its device ID. */ + * We currently know about two kinds of DME1737 and SCH5027. */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); - if (!(reg == 0x77 || reg == 0x78 || reg == 0x89)) { + if (!(reg == DME1737_ID_1 || reg == DME1737_ID_2 || + reg == SCH5027_ID)) { err = -ENODEV; goto exit; } @@ -2185,16 +2289,16 @@ static int dme1737_i2c_get_features(int sio_cip, struct dme1737_data *data) * are enabled and available. Bits [3:2] of registers 0x43-0x46 are set * to '10' if the respective feature is enabled. */ if ((inb(addr + 0x43) & 0x0c) == 0x08) { /* fan6 */ - data->has_fan |= (1 << 5); + data->has_features |= HAS_FAN(5); } if ((inb(addr + 0x44) & 0x0c) == 0x08) { /* pwm6 */ - data->has_pwm |= (1 << 5); + data->has_features |= HAS_PWM(5); } if ((inb(addr + 0x45) & 0x0c) == 0x08) { /* fan5 */ - data->has_fan |= (1 << 4); + data->has_features |= HAS_FAN(4); } if ((inb(addr + 0x46) & 0x0c) == 0x08) { /* pwm5 */ - data->has_pwm |= (1 << 4); + data->has_features |= HAS_PWM(4); } exit: @@ -2222,7 +2326,6 @@ static int dme1737_i2c_detect(struct i2c_client *client, if (company == DME1737_COMPANY_SMSC && verstep == SCH5027_VERSTEP) { name = "sch5027"; - } else if (company == DME1737_COMPANY_SMSC && (verstep & DME1737_VERSTEP_MASK) == DME1737_VERSTEP) { name = "dme1737"; @@ -2329,10 +2432,10 @@ static int __init dme1737_isa_detect(int sio_cip, unsigned short *addr) dme1737_sio_enter(sio_cip); /* Check device ID - * We currently know about SCH3112 (0x7c), SCH3114 (0x7d), and - * SCH3116 (0x7f). */ + * We currently know about SCH3112, SCH3114, SCH3116, and SCH5127 */ reg = force_id ? force_id : dme1737_sio_inb(sio_cip, 0x20); - if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { + if (!(reg == SCH3112_ID || reg == SCH3114_ID || reg == SCH3116_ID || + reg == SCH5127_ID)) { err = -ENODEV; goto exit; } @@ -2424,23 +2527,42 @@ static int __devinit dme1737_isa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); /* Skip chip detection if module is loaded with force_id parameter */ - if (!force_id) { + switch (force_id) { + case SCH3112_ID: + case SCH3114_ID: + case SCH3116_ID: + data->type = sch311x; + break; + case SCH5127_ID: + data->type = sch5127; + break; + default: company = dme1737_read(data, DME1737_REG_COMPANY); device = dme1737_read(data, DME1737_REG_DEVICE); - if (!((company == DME1737_COMPANY_SMSC) && - (device == SCH311X_DEVICE))) { + if ((company == DME1737_COMPANY_SMSC) && + (device == SCH311X_DEVICE)) { + data->type = sch311x; + } else if ((company == DME1737_COMPANY_SMSC) && + (device == SCH5127_DEVICE)) { + data->type = sch5127; + } else { err = -ENODEV; goto exit_kfree; } } - data->type = sch311x; - /* Fill in the remaining client fields and initialize the mutex */ - data->name = "sch311x"; + if (data->type == sch5127) { + data->name = "sch5127"; + } else { + data->name = "sch311x"; + } + + /* Initialize the mutex */ mutex_init(&data->update_lock); - dev_info(dev, "Found a SCH311x chip at 0x%04x\n", data->addr); + dev_info(dev, "Found a %s chip at 0x%04x\n", + data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr); /* Initialize the chip */ if ((err = dme1737_init_device(dev))) { diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c new file mode 100644 index 000000000000..0e4b5642638d --- /dev/null +++ b/drivers/hwmon/emc1403.c @@ -0,0 +1,344 @@ +/* + * emc1403.c - SMSC Thermal Driver + * + * Copyright (C) 2008 Intel Corp + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * TODO + * - cache alarm and critical limit registers + * - add emc1404 support + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define THERMAL_PID_REG 0xfd +#define THERMAL_SMSC_ID_REG 0xfe +#define THERMAL_REVISION_REG 0xff + +struct thermal_data { + struct device *hwmon_dev; + struct mutex mutex; + /* Cache the hyst value so we don't keep re-reading it. In theory + we could cache it forever as nobody else should be writing it. */ + u8 cached_hyst; + unsigned long hyst_valid; +}; + +static ssize_t show_temp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval = i2c_smbus_read_byte_data(client, sda->index); + + if (retval < 0) + return retval; + return sprintf(buf, "%d000\n", retval); +} + +static ssize_t show_bit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr); + int retval = i2c_smbus_read_byte_data(client, sda->nr); + + if (retval < 0) + return retval; + retval &= sda->index; + return sprintf(buf, "%d\n", retval ? 1 : 0); +} + +static ssize_t store_temp(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct i2c_client *client = to_i2c_client(dev); + unsigned long val; + int retval; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + retval = i2c_smbus_write_byte_data(client, sda->index, + DIV_ROUND_CLOSEST(val, 1000)); + if (retval < 0) + return retval; + return count; +} + +static ssize_t show_hyst(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct thermal_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval; + int hyst; + + retval = i2c_smbus_read_byte_data(client, sda->index); + if (retval < 0) + return retval; + + if (time_after(jiffies, data->hyst_valid)) { + hyst = i2c_smbus_read_byte_data(client, 0x21); + if (hyst < 0) + return retval; + data->cached_hyst = hyst; + data->hyst_valid = jiffies + HZ; + } + return sprintf(buf, "%d000\n", retval - data->cached_hyst); +} + +static ssize_t store_hyst(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct thermal_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + int retval; + int hyst; + unsigned long val; + + if (strict_strtoul(buf, 10, &val)) + return -EINVAL; + + mutex_lock(&data->mutex); + retval = i2c_smbus_read_byte_data(client, sda->index); + if (retval < 0) + goto fail; + + hyst = val - retval * 1000; + hyst = DIV_ROUND_CLOSEST(hyst, 1000); + if (hyst < 0 || hyst > 255) { + retval = -ERANGE; + goto fail; + } + + retval = i2c_smbus_write_byte_data(client, 0x21, hyst); + if (retval == 0) { + retval = count; + data->cached_hyst = hyst; + data->hyst_valid = jiffies + HZ; + } +fail: + mutex_unlock(&data->mutex); + return retval; +} + +/* + * Sensors. We pass the actual i2c register to the methods. + */ + +static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x06); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x05); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x20); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0x00); +static SENSOR_DEVICE_ATTR_2(temp1_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x01); +static SENSOR_DEVICE_ATTR_2(temp1_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x01); +static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x01); +static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x20); + +static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x08); +static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x07); +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x19); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0x01); +static SENSOR_DEVICE_ATTR_2(temp2_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x02); +static SENSOR_DEVICE_ATTR_2(temp2_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x02); +static SENSOR_DEVICE_ATTR_2(temp2_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x02); +static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x19); + +static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x16); +static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x15); +static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO | S_IWUSR, + show_temp, store_temp, 0x1A); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 0x23); +static SENSOR_DEVICE_ATTR_2(temp3_min_alarm, S_IRUGO, + show_bit, NULL, 0x36, 0x04); +static SENSOR_DEVICE_ATTR_2(temp3_max_alarm, S_IRUGO, + show_bit, NULL, 0x35, 0x04); +static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO, + show_bit, NULL, 0x37, 0x04); +static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR, + show_hyst, store_hyst, 0x1A); + +static struct attribute *mid_att_thermal[] = { + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp2_crit.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp3_crit.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, + NULL +}; + +static const struct attribute_group m_thermal_gr = { + .attrs = mid_att_thermal +}; + +static int emc1403_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + int id; + /* Check if thermal chip is SMSC and EMC1403 */ + + id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG); + if (id != 0x5d) + return -ENODEV; + + /* Note: 0x25 is the 1404 which is very similar and this + driver could be extended */ + id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG); + if (id != 0x21) + return -ENODEV; + + id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); + if (id != 0x01) + return -ENODEV; + + strlcpy(info->type, "emc1403", I2C_NAME_SIZE); + return 0; +} + +static int emc1403_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int res; + struct thermal_data *data; + + data = kzalloc(sizeof(struct thermal_data), GFP_KERNEL); + if (data == NULL) { + dev_warn(&client->dev, "out of memory"); + return -ENOMEM; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->mutex); + data->hyst_valid = jiffies - 1; /* Expired */ + + res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); + if (res) { + dev_warn(&client->dev, "create group failed\n"); + hwmon_device_unregister(data->hwmon_dev); + goto thermal_error1; + } + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + res = PTR_ERR(data->hwmon_dev); + dev_warn(&client->dev, "register hwmon dev failed\n"); + goto thermal_error2; + } + dev_info(&client->dev, "EMC1403 Thermal chip found\n"); + return res; + +thermal_error2: + sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); +thermal_error1: + kfree(data); + return res; +} + +static int emc1403_remove(struct i2c_client *client) +{ + struct thermal_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &m_thermal_gr); + kfree(data); + return 0; +} + +static const unsigned short emc1403_address_list[] = { + 0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END +}; + +static const struct i2c_device_id emc1403_idtable[] = { + { "emc1403", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, emc1403_idtable); + +static struct i2c_driver sensor_emc1403 = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "emc1403", + }, + .detect = emc1403_detect, + .probe = emc1403_probe, + .remove = emc1403_remove, + .id_table = emc1403_idtable, + .address_list = emc1403_address_list, +}; + +static int __init sensor_emc1403_init(void) +{ + return i2c_add_driver(&sensor_emc1403); +} + +static void __exit sensor_emc1403_exit(void) +{ + i2c_del_driver(&sensor_emc1403); +} + +module_init(sensor_emc1403_init); +module_exit(sensor_emc1403_exit); + +MODULE_AUTHOR("Kalhan Trisal addr + ADDR_REG_OFFSET); - val = inb(data->addr + DATA_REG_OFFSET) << 8; - outb(reg, data->addr + ADDR_REG_OFFSET); - val |= inb(data->addr + DATA_REG_OFFSET); + val = f71882fg_read8(data, reg) << 8; + val |= f71882fg_read8(data, reg + 1); return val; } @@ -921,10 +917,8 @@ static void f71882fg_write8(struct f71882fg_data *data, u8 reg, u8 val) static void f71882fg_write16(struct f71882fg_data *data, u8 reg, u16 val) { - outb(reg++, data->addr + ADDR_REG_OFFSET); - outb(val >> 8, data->addr + DATA_REG_OFFSET); - outb(reg, data->addr + ADDR_REG_OFFSET); - outb(val & 255, data->addr + DATA_REG_OFFSET); + f71882fg_write8(data, reg, val >> 8); + f71882fg_write8(data, reg + 1, val & 0xff); } static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) @@ -945,7 +939,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) mutex_lock(&data->update_lock); /* Update once every 60 seconds */ - if ( time_after(jiffies, data->last_limits + 60 * HZ ) || + if (time_after(jiffies, data->last_limits + 60 * HZ) || !data->valid) { if (data->type == f71882fg || data->type == f71889fg) { data->in1_max = @@ -1127,8 +1121,12 @@ static ssize_t store_fan_full_speed(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; val = SENSORS_LIMIT(val, 23, 1500000); val = fan_to_reg(val); @@ -1157,8 +1155,12 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); @@ -1206,7 +1208,14 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - long val = simple_strtol(buf, NULL, 10) / 8; + int err; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 8; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1233,8 +1242,12 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP); @@ -1299,8 +1312,14 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1333,10 +1352,16 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; ssize_t ret = count; u8 reg; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; mutex_lock(&data->update_lock); @@ -1372,8 +1397,14 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10) / 1000; + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1427,8 +1458,12 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); @@ -1490,8 +1525,13 @@ static ssize_t store_pwm(struct device *dev, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1551,8 +1591,12 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; /* Special case for F8000 pwm channel 3 which only does auto mode */ if (data->type == f8000 && nr == 2 && val != 2) @@ -1626,9 +1670,14 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int pwm = to_sensor_dev_attr_2(devattr)->index; + int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10); + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); @@ -1674,10 +1723,16 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; + int err, nr = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10) / 1000; u8 reg; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; mutex_lock(&data->update_lock); data->pwm_auto_point_temp[nr][point] = @@ -1716,8 +1771,12 @@ static ssize_t store_pwm_interpolate(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - unsigned long val = simple_strtoul(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err) + return err; mutex_lock(&data->update_lock); data->pwm_auto_point_mapping[nr] = @@ -1752,8 +1811,12 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr = to_sensor_dev_attr_2(devattr)->index; - long val = simple_strtol(buf, NULL, 10); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; switch (val) { case 1: @@ -1798,9 +1861,15 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, const char *buf, size_t count) { struct f71882fg_data *data = dev_get_drvdata(dev); - int pwm = to_sensor_dev_attr_2(devattr)->index; + int err, pwm = to_sensor_dev_attr_2(devattr)->index; int point = to_sensor_dev_attr_2(devattr)->nr; - long val = simple_strtol(buf, NULL, 10) / 1000; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val /= 1000; if (data->type == f71889fg) val = SENSORS_LIMIT(val, -128, 127); @@ -2109,6 +2178,13 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, int err = -ENODEV; u16 devid; + /* Don't step on other drivers' I/O space by accident */ + if (!request_region(sioaddr, 2, DRVNAME)) { + printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", + (int)sioaddr); + return -EBUSY; + } + superio_enter(sioaddr); devid = superio_inw(sioaddr, SIO_REG_MANID); @@ -2151,8 +2227,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, } *address = superio_inw(sioaddr, SIO_REG_ADDR); - if (*address == 0) - { + if (*address == 0) { printk(KERN_WARNING DRVNAME ": Base address not set\n"); goto exit; } @@ -2164,6 +2239,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); + release_region(sioaddr, 2); return err; } diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c index bf81aff7051d..776aeb3019d2 100644 --- a/drivers/hwmon/lm63.c +++ b/drivers/hwmon/lm63.c @@ -53,7 +53,7 @@ * Address is fully defined internally and cannot be changed. */ -static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; +static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END }; /* * The LM63 registers @@ -131,12 +131,15 @@ static struct lm63_data *lm63_update_device(struct device *dev); static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info); static void lm63_init_client(struct i2c_client *client); +enum chips { lm63, lm64 }; + /* * Driver data (common to all clients) */ static const struct i2c_device_id lm63_id[] = { - { "lm63", 0 }, + { "lm63", lm63 }, + { "lm64", lm64 }, { } }; MODULE_DEVICE_TABLE(i2c, lm63_id); @@ -422,6 +425,7 @@ static int lm63_detect(struct i2c_client *new_client, struct i2c_adapter *adapter = new_client->adapter; u8 man_id, chip_id, reg_config1, reg_config2; u8 reg_alert_status, reg_alert_mask; + int address = new_client->addr; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; @@ -439,7 +443,6 @@ static int lm63_detect(struct i2c_client *new_client, LM63_REG_ALERT_MASK); if (man_id != 0x01 /* National Semiconductor */ - || chip_id != 0x41 /* LM63 */ || (reg_config1 & 0x18) != 0x00 || (reg_config2 & 0xF8) != 0x00 || (reg_alert_status & 0x20) != 0x00 @@ -450,7 +453,12 @@ static int lm63_detect(struct i2c_client *new_client, return -ENODEV; } - strlcpy(info->type, "lm63", I2C_NAME_SIZE); + if (chip_id == 0x41 && address == 0x4c) + strlcpy(info->type, "lm63", I2C_NAME_SIZE); + else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e)) + strlcpy(info->type, "lm64", I2C_NAME_SIZE); + else + return -ENODEV; return 0; } diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c index 8ae2cfe2d827..56463428a419 100644 --- a/drivers/hwmon/lm75.c +++ b/drivers/hwmon/lm75.c @@ -46,6 +46,7 @@ enum lm75_type { /* keep sorted in alphabetical order */ tcn75, tmp100, tmp101, + tmp105, tmp175, tmp275, tmp75, @@ -220,6 +221,7 @@ static const struct i2c_device_id lm75_ids[] = { { "tcn75", tcn75, }, { "tmp100", tmp100, }, { "tmp101", tmp101, }, + { "tmp105", tmp105, }, { "tmp175", tmp175, }, { "tmp275", tmp275, }, { "tmp75", tmp75, }, diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 7cc2708871ab..760ef72eea56 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -982,7 +982,8 @@ static struct lm90_data *lm90_update_device(struct device *dev) mutex_lock(&data->update_lock); - if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) { + if (time_after(jiffies, data->last_updated + HZ / 2 + HZ / 10) + || !data->valid) { u8 h, l; dev_dbg(&client->dev, "Updating lm90 data.\n"); diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c index 65c232a9d0c5..21d201befc2c 100644 --- a/drivers/hwmon/ltc4245.c +++ b/drivers/hwmon/ltc4245.c @@ -45,9 +45,7 @@ enum ltc4245_cmd { LTC4245_VEEIN = 0x19, LTC4245_VEESENSE = 0x1a, LTC4245_VEEOUT = 0x1b, - LTC4245_GPIOADC1 = 0x1c, - LTC4245_GPIOADC2 = 0x1d, - LTC4245_GPIOADC3 = 0x1e, + LTC4245_GPIOADC = 0x1c, }; struct ltc4245_data { @@ -61,7 +59,7 @@ struct ltc4245_data { u8 cregs[0x08]; /* Voltage registers */ - u8 vregs[0x0f]; + u8 vregs[0x0d]; }; static struct ltc4245_data *ltc4245_update_device(struct device *dev) @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev) data->cregs[i] = val; } - /* Read voltage registers -- 0x10 to 0x1f */ + /* Read voltage registers -- 0x10 to 0x1c */ for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { val = i2c_smbus_read_byte_data(client, i+0x10); if (unlikely(val < 0)) @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg) case LTC4245_VEEOUT: voltage = regval * -55; break; - case LTC4245_GPIOADC1: - case LTC4245_GPIOADC2: - case LTC4245_GPIOADC3: + case LTC4245_GPIOADC: voltage = regval * 10; break; default: @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); /* GPIO voltages */ -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1); -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2); -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3); +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC); /* Power Consumption (virtual) */ LTC4245_POWER(power1_input, LTC4245_12VSENSE); @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = { &sensor_dev_attr_in8_min_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, - &sensor_dev_attr_in10_input.dev_attr.attr, - &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, &sensor_dev_attr_power2_input.dev_attr.attr, diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c new file mode 100644 index 000000000000..8013895a1faf --- /dev/null +++ b/drivers/hwmon/tmp102.c @@ -0,0 +1,321 @@ +/* Texas Instruments TMP102 SMBus temperature sensor driver + * + * Copyright (C) 2010 Steven King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "tmp102" + +#define TMP102_TEMP_REG 0x00 +#define TMP102_CONF_REG 0x01 +/* note: these bit definitions are byte swapped */ +#define TMP102_CONF_SD 0x0100 +#define TMP102_CONF_TM 0x0200 +#define TMP102_CONF_POL 0x0400 +#define TMP102_CONF_F0 0x0800 +#define TMP102_CONF_F1 0x1000 +#define TMP102_CONF_R0 0x2000 +#define TMP102_CONF_R1 0x4000 +#define TMP102_CONF_OS 0x8000 +#define TMP102_CONF_EM 0x0010 +#define TMP102_CONF_AL 0x0020 +#define TMP102_CONF_CR0 0x0040 +#define TMP102_CONF_CR1 0x0080 +#define TMP102_TLOW_REG 0x02 +#define TMP102_THIGH_REG 0x03 + +struct tmp102 { + struct device *hwmon_dev; + struct mutex lock; + u16 config_orig; + unsigned long last_update; + int temp[3]; +}; + +/* SMBus specifies low byte first, but the TMP102 returns high byte first, + * so we have to swab16 the values */ +static inline int tmp102_read_reg(struct i2c_client *client, u8 reg) +{ + int result = i2c_smbus_read_word_data(client, reg); + return result < 0 ? result : swab16(result); +} + +static inline int tmp102_write_reg(struct i2c_client *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, swab16(val)); +} + +/* convert left adjusted 13-bit TMP102 register value to milliCelsius */ +static inline int tmp102_reg_to_mC(s16 val) +{ + return ((val & ~0x01) * 1000) / 128; +} + +/* convert milliCelsius to left adjusted 13-bit TMP102 register value */ +static inline u16 tmp102_mC_to_reg(int val) +{ + return (val * 128) / 1000; +} + +static const u8 tmp102_reg[] = { + TMP102_TEMP_REG, + TMP102_TLOW_REG, + TMP102_THIGH_REG, +}; + +static struct tmp102 *tmp102_update_device(struct i2c_client *client) +{ + struct tmp102 *tmp102 = i2c_get_clientdata(client); + + mutex_lock(&tmp102->lock); + if (time_after(jiffies, tmp102->last_update + HZ / 3)) { + int i; + for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) { + int status = tmp102_read_reg(client, tmp102_reg[i]); + if (status > -1) + tmp102->temp[i] = tmp102_reg_to_mC(status); + } + tmp102->last_update = jiffies; + } + mutex_unlock(&tmp102->lock); + return tmp102; +} + +static ssize_t tmp102_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev)); + + return sprintf(buf, "%d\n", tmp102->temp[sda->index]); +} + +static ssize_t tmp102_set_temp(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); + struct i2c_client *client = to_i2c_client(dev); + struct tmp102 *tmp102 = i2c_get_clientdata(client); + long val; + int status; + + if (strict_strtol(buf, 10, &val) < 0) + return -EINVAL; + val = SENSORS_LIMIT(val, -256000, 255000); + + mutex_lock(&tmp102->lock); + tmp102->temp[sda->index] = val; + status = tmp102_write_reg(client, tmp102_reg[sda->index], + tmp102_mC_to_reg(val)); + mutex_unlock(&tmp102->lock); + return status ? : count; +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0); + +static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp, + tmp102_set_temp, 1); + +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp, + tmp102_set_temp, 2); + +static struct attribute *tmp102_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group tmp102_attr_group = { + .attrs = tmp102_attributes, +}; + +#define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1) +#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL) + +static int __devinit tmp102_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tmp102 *tmp102; + int status; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "adapter doesnt support SMBus word " + "transactions\n"); + return -ENODEV; + } + + tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL); + if (!tmp102) { + dev_dbg(&client->dev, "kzalloc failed\n"); + return -ENOMEM; + } + i2c_set_clientdata(client, tmp102); + + status = tmp102_read_reg(client, TMP102_CONF_REG); + if (status < 0) { + dev_err(&client->dev, "error reading config register\n"); + goto fail_free; + } + tmp102->config_orig = status; + status = tmp102_write_reg(client, TMP102_CONF_REG, TMP102_CONFIG); + if (status < 0) { + dev_err(&client->dev, "error writing config register\n"); + goto fail_restore_config; + } + status = tmp102_read_reg(client, TMP102_CONF_REG); + if (status < 0) { + dev_err(&client->dev, "error reading config register\n"); + goto fail_restore_config; + } + status &= ~TMP102_CONFIG_RD_ONLY; + if (status != TMP102_CONFIG) { + dev_err(&client->dev, "config settings did not stick\n"); + status = -ENODEV; + goto fail_restore_config; + } + tmp102->last_update = jiffies - HZ; + mutex_init(&tmp102->lock); + + status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group); + if (status) { + dev_dbg(&client->dev, "could not create sysfs files\n"); + goto fail_restore_config; + } + tmp102->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(tmp102->hwmon_dev)) { + dev_dbg(&client->dev, "unable to register hwmon device\n"); + status = PTR_ERR(tmp102->hwmon_dev); + goto fail_remove_sysfs; + } + + dev_info(&client->dev, "initialized\n"); + + return 0; + +fail_remove_sysfs: + sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); +fail_restore_config: + tmp102_write_reg(client, TMP102_CONF_REG, tmp102->config_orig); +fail_free: + i2c_set_clientdata(client, NULL); + kfree(tmp102); + + return status; +} + +static int __devexit tmp102_remove(struct i2c_client *client) +{ + struct tmp102 *tmp102 = i2c_get_clientdata(client); + + hwmon_device_unregister(tmp102->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); + + /* Stop monitoring if device was stopped originally */ + if (tmp102->config_orig & TMP102_CONF_SD) { + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config >= 0) + tmp102_write_reg(client, TMP102_CONF_REG, + config | TMP102_CONF_SD); + } + + i2c_set_clientdata(client, NULL); + kfree(tmp102); + + return 0; +} + +#ifdef CONFIG_PM +static int tmp102_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config < 0) + return config; + + config |= TMP102_CONF_SD; + return tmp102_write_reg(client, TMP102_CONF_REG, config); +} + +static int tmp102_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + int config; + + config = tmp102_read_reg(client, TMP102_CONF_REG); + if (config < 0) + return config; + + config &= ~TMP102_CONF_SD; + return tmp102_write_reg(client, TMP102_CONF_REG, config); +} + +static const struct dev_pm_ops tmp102_dev_pm_ops = { + .suspend = tmp102_suspend, + .resume = tmp102_resume, +}; + +#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) +#else +#define TMP102_DEV_PM_OPS NULL +#endif /* CONFIG_PM */ + +static const struct i2c_device_id tmp102_id[] = { + { "tmp102", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tmp102_id); + +static struct i2c_driver tmp102_driver = { + .driver.name = DRIVER_NAME, + .driver.pm = TMP102_DEV_PM_OPS, + .probe = tmp102_probe, + .remove = __devexit_p(tmp102_remove), + .id_table = tmp102_id, +}; + +static int __init tmp102_init(void) +{ + return i2c_add_driver(&tmp102_driver); +} +module_init(tmp102_init); + +static void __exit tmp102_exit(void) +{ + i2c_del_driver(&tmp102_driver); +} +module_exit(tmp102_exit); + +MODULE_AUTHOR("Steven King "); +MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index d14a1af9f550..ad8d535235c5 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -91,17 +91,6 @@ static const u8 TMP411_TEMP_HIGHEST_LSB[2] = { 0x33, 0x37 }; #define TMP401_DEVICE_ID 0x11 #define TMP411_DEVICE_ID 0x12 -/* - * Functions declarations - */ - -static int tmp401_probe(struct i2c_client *client, - const struct i2c_device_id *id); -static int tmp401_detect(struct i2c_client *client, - struct i2c_board_info *info); -static int tmp401_remove(struct i2c_client *client); -static struct tmp401_data *tmp401_update_device(struct device *dev); - /* * Driver data (common to all clients) */ @@ -113,18 +102,6 @@ static const struct i2c_device_id tmp401_id[] = { }; MODULE_DEVICE_TABLE(i2c, tmp401_id); -static struct i2c_driver tmp401_driver = { - .class = I2C_CLASS_HWMON, - .driver = { - .name = "tmp401", - }, - .probe = tmp401_probe, - .remove = tmp401_remove, - .id_table = tmp401_id, - .detect = tmp401_detect, - .address_list = normal_i2c, -}; - /* * Client data (each client gets its own) */ @@ -194,6 +171,71 @@ static u8 tmp401_crit_temp_to_register(long temp, u8 config) return (temp + 500) / 1000; } +static struct tmp401_data *tmp401_update_device_reg16( + struct i2c_client *client, struct tmp401_data *data) +{ + int i; + + for (i = 0; i < 2; i++) { + /* + * High byte must be read first immediately followed + * by the low byte + */ + data->temp[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_MSB[i]) << 8; + data->temp[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_LSB[i]); + data->temp_low[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8; + data->temp_low[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_LOW_LIMIT_LSB[i]); + data->temp_high[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8; + data->temp_high[i] |= i2c_smbus_read_byte_data(client, + TMP401_TEMP_HIGH_LIMIT_LSB[i]); + data->temp_crit[i] = i2c_smbus_read_byte_data(client, + TMP401_TEMP_CRIT_LIMIT[i]); + + if (data->kind == tmp411) { + data->temp_lowest[i] = i2c_smbus_read_byte_data(client, + TMP411_TEMP_LOWEST_MSB[i]) << 8; + data->temp_lowest[i] |= i2c_smbus_read_byte_data( + client, TMP411_TEMP_LOWEST_LSB[i]); + + data->temp_highest[i] = i2c_smbus_read_byte_data( + client, TMP411_TEMP_HIGHEST_MSB[i]) << 8; + data->temp_highest[i] |= i2c_smbus_read_byte_data( + client, TMP411_TEMP_HIGHEST_LSB[i]); + } + } + return data; +} + +static struct tmp401_data *tmp401_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct tmp401_data *data = i2c_get_clientdata(client); + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS); + data->config = i2c_smbus_read_byte_data(client, + TMP401_CONFIG_READ); + tmp401_update_device_reg16(client, data); + + data->temp_crit_hyst = i2c_smbus_read_byte_data(client, + TMP401_TEMP_CRIT_HYST); + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + static ssize_t show_temp_value(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -420,30 +462,36 @@ static ssize_t reset_temp_history(struct device *dev, } static struct sensor_device_attribute tmp401_attr[] = { - SENSOR_ATTR(temp1_input, 0444, show_temp_value, NULL, 0), - SENSOR_ATTR(temp1_min, 0644, show_temp_min, store_temp_min, 0), - SENSOR_ATTR(temp1_max, 0644, show_temp_max, store_temp_max, 0), - SENSOR_ATTR(temp1_crit, 0644, show_temp_crit, store_temp_crit, 0), - SENSOR_ATTR(temp1_crit_hyst, 0644, show_temp_crit_hyst, + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp_value, NULL, 0), + SENSOR_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min, + store_temp_min, 0), + SENSOR_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, + store_temp_max, 0), + SENSOR_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit, + store_temp_crit, 0), + SENSOR_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_crit_hyst, store_temp_crit_hyst, 0), - SENSOR_ATTR(temp1_min_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_min_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_LOW), - SENSOR_ATTR(temp1_max_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_max_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_HIGH), - SENSOR_ATTR(temp1_crit_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp1_crit_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_LOCAL_CRIT), - SENSOR_ATTR(temp2_input, 0444, show_temp_value, NULL, 1), - SENSOR_ATTR(temp2_min, 0644, show_temp_min, store_temp_min, 1), - SENSOR_ATTR(temp2_max, 0644, show_temp_max, store_temp_max, 1), - SENSOR_ATTR(temp2_crit, 0644, show_temp_crit, store_temp_crit, 1), - SENSOR_ATTR(temp2_crit_hyst, 0444, show_temp_crit_hyst, NULL, 1), - SENSOR_ATTR(temp2_fault, 0444, show_status, NULL, + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp_value, NULL, 1), + SENSOR_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min, + store_temp_min, 1), + SENSOR_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, + store_temp_max, 1), + SENSOR_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit, + store_temp_crit, 1), + SENSOR_ATTR(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 1), + SENSOR_ATTR(temp2_fault, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_OPEN), - SENSOR_ATTR(temp2_min_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_min_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_LOW), - SENSOR_ATTR(temp2_max_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_max_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_HIGH), - SENSOR_ATTR(temp2_crit_alarm, 0444, show_status, NULL, + SENSOR_ATTR(temp2_crit_alarm, S_IRUGO, show_status, NULL, TMP401_STATUS_REMOTE_CRIT), }; @@ -455,11 +503,11 @@ static struct sensor_device_attribute tmp401_attr[] = { * and remote channels. */ static struct sensor_device_attribute tmp411_attr[] = { - SENSOR_ATTR(temp1_highest, 0444, show_temp_highest, NULL, 0), - SENSOR_ATTR(temp1_lowest, 0444, show_temp_lowest, NULL, 0), - SENSOR_ATTR(temp2_highest, 0444, show_temp_highest, NULL, 1), - SENSOR_ATTR(temp2_lowest, 0444, show_temp_lowest, NULL, 1), - SENSOR_ATTR(temp_reset_history, 0200, NULL, reset_temp_history, 0), + SENSOR_ATTR(temp1_highest, S_IRUGO, show_temp_highest, NULL, 0), + SENSOR_ATTR(temp1_lowest, S_IRUGO, show_temp_lowest, NULL, 0), + SENSOR_ATTR(temp2_highest, S_IRUGO, show_temp_highest, NULL, 1), + SENSOR_ATTR(temp2_lowest, S_IRUGO, show_temp_lowest, NULL, 1), + SENSOR_ATTR(temp_reset_history, S_IWUSR, NULL, reset_temp_history, 0), }; /* @@ -529,6 +577,27 @@ static int tmp401_detect(struct i2c_client *client, return 0; } +static int tmp401_remove(struct i2c_client *client) +{ + struct tmp401_data *data = i2c_get_clientdata(client); + int i; + + if (data->hwmon_dev) + hwmon_device_unregister(data->hwmon_dev); + + for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) + device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); + + if (data->kind == tmp411) { + for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) + device_remove_file(&client->dev, + &tmp411_attr[i].dev_attr); + } + + kfree(data); + return 0; +} + static int tmp401_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -581,91 +650,17 @@ exit_remove: return err; } -static int tmp401_remove(struct i2c_client *client) -{ - struct tmp401_data *data = i2c_get_clientdata(client); - int i; - - if (data->hwmon_dev) - hwmon_device_unregister(data->hwmon_dev); - - for (i = 0; i < ARRAY_SIZE(tmp401_attr); i++) - device_remove_file(&client->dev, &tmp401_attr[i].dev_attr); - - if (data->kind == tmp411) { - for (i = 0; i < ARRAY_SIZE(tmp411_attr); i++) - device_remove_file(&client->dev, - &tmp411_attr[i].dev_attr); - } - - kfree(data); - return 0; -} - -static struct tmp401_data *tmp401_update_device_reg16( - struct i2c_client *client, struct tmp401_data *data) -{ - int i; - - for (i = 0; i < 2; i++) { - /* - * High byte must be read first immediately followed - * by the low byte - */ - data->temp[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_MSB[i]) << 8; - data->temp[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_LSB[i]); - data->temp_low[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_LOW_LIMIT_MSB_READ[i]) << 8; - data->temp_low[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_LOW_LIMIT_LSB[i]); - data->temp_high[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_HIGH_LIMIT_MSB_READ[i]) << 8; - data->temp_high[i] |= i2c_smbus_read_byte_data(client, - TMP401_TEMP_HIGH_LIMIT_LSB[i]); - data->temp_crit[i] = i2c_smbus_read_byte_data(client, - TMP401_TEMP_CRIT_LIMIT[i]); - - if (data->kind == tmp411) { - data->temp_lowest[i] = i2c_smbus_read_byte_data(client, - TMP411_TEMP_LOWEST_MSB[i]) << 8; - data->temp_lowest[i] |= i2c_smbus_read_byte_data( - client, TMP411_TEMP_LOWEST_LSB[i]); - - data->temp_highest[i] = i2c_smbus_read_byte_data( - client, TMP411_TEMP_HIGHEST_MSB[i]) << 8; - data->temp_highest[i] |= i2c_smbus_read_byte_data( - client, TMP411_TEMP_HIGHEST_LSB[i]); - } - } - return data; -} - -static struct tmp401_data *tmp401_update_device(struct device *dev) -{ - struct i2c_client *client = to_i2c_client(dev); - struct tmp401_data *data = i2c_get_clientdata(client); - - mutex_lock(&data->update_lock); - - if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { - data->status = i2c_smbus_read_byte_data(client, TMP401_STATUS); - data->config = i2c_smbus_read_byte_data(client, - TMP401_CONFIG_READ); - tmp401_update_device_reg16(client, data); - - data->temp_crit_hyst = i2c_smbus_read_byte_data(client, - TMP401_TEMP_CRIT_HYST); - - data->last_updated = jiffies; - data->valid = 1; - } - - mutex_unlock(&data->update_lock); - - return data; -} +static struct i2c_driver tmp401_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "tmp401", + }, + .probe = tmp401_probe, + .remove = tmp401_remove, + .id_table = tmp401_id, + .detect = tmp401_detect, + .address_list = normal_i2c, +}; static int __init tmp401_init(void) { diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c index b9e517de6a82..3feaa26410be 100644 --- a/drivers/ide/gayle.c +++ b/drivers/ide/gayle.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -23,15 +24,6 @@ #include - /* - * Bases of the IDE interfaces - */ - -#define GAYLE_BASE_4000 0xdd2020 /* A4000/A4000T */ -#define GAYLE_BASE_1200 0xda0000 /* A1200/A600 and E-Matrix 530 */ - -#define GAYLE_IDEREG_SIZE 0x2000 - /* * Offsets from one of the above bases */ @@ -68,20 +60,20 @@ MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); static int gayle_test_irq(ide_hwif_t *hwif) { - unsigned char ch; + unsigned char ch; - ch = z_readb(hwif->io_ports.irq_addr); - if (!(ch & GAYLE_IRQ_IDE)) - return 0; - return 1; + ch = z_readb(hwif->io_ports.irq_addr); + if (!(ch & GAYLE_IRQ_IDE)) + return 0; + return 1; } static void gayle_a1200_clear_irq(ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; + ide_hwif_t *hwif = drive->hwif; - (void)z_readb(hwif->io_ports.status_addr); - z_writeb(0x7c, hwif->io_ports.irq_addr); + (void)z_readb(hwif->io_ports.status_addr); + z_writeb(0x7c, hwif->io_ports.irq_addr); } static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, @@ -122,64 +114,89 @@ static const struct ide_port_info gayle_port_info = { * Probe for a Gayle IDE interface (and optionally for an IDE doubler) */ -static int __init gayle_init(void) +static int __init amiga_gayle_ide_probe(struct platform_device *pdev) { - unsigned long phys_base, res_start, res_n; - unsigned long base, ctrlport, irqport; - int a4000, i, rc; - struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; - struct ide_port_info d = gayle_port_info; - - if (!MACH_IS_AMIGA) - return -ENODEV; - - if ((a4000 = AMIGAHW_PRESENT(A4000_IDE)) || AMIGAHW_PRESENT(A1200_IDE)) - goto found; - -#ifdef CONFIG_ZORRO - if (zorro_find_device(ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE, - NULL)) - goto found; -#endif - return -ENODEV; - -found: - printk(KERN_INFO "ide: Gayle IDE controller (A%d style%s)\n", - a4000 ? 4000 : 1200, - ide_doubler ? ", IDE doubler" : ""); - - if (a4000) { - phys_base = GAYLE_BASE_4000; - irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_4000); - d.port_ops = &gayle_a4000_port_ops; - } else { - phys_base = GAYLE_BASE_1200; - irqport = (unsigned long)ZTWO_VADDR(GAYLE_IRQ_1200); - d.port_ops = &gayle_a1200_port_ops; + struct resource *res; + struct gayle_ide_platform_data *pdata; + unsigned long base, ctrlport, irqport; + unsigned int i; + int error; + struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; + struct ide_port_info d = gayle_port_info; + struct ide_host *host; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), "IDE")) + return -EBUSY; + + pdata = pdev->dev.platform_data; + pr_info("ide: Gayle IDE controller (A%u style%s)\n", + pdata->explicit_ack ? 1200 : 4000, + ide_doubler ? ", IDE doubler" : ""); + + base = (unsigned long)ZTWO_VADDR(pdata->base); + ctrlport = 0; + irqport = (unsigned long)ZTWO_VADDR(pdata->irqport); + if (pdata->explicit_ack) + d.port_ops = &gayle_a1200_port_ops; + else + d.port_ops = &gayle_a4000_port_ops; + + for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++, base += GAYLE_NEXT_PORT) { + if (GAYLE_HAS_CONTROL_REG) + ctrlport = base + GAYLE_CONTROL; + + gayle_setup_ports(&hw[i], base, ctrlport, irqport); + hws[i] = &hw[i]; } - res_start = ((unsigned long)phys_base) & ~(GAYLE_NEXT_PORT-1); - res_n = GAYLE_IDEREG_SIZE; + error = ide_host_add(&d, hws, i, &host); + if (error) + goto out; - if (!request_mem_region(res_start, res_n, "IDE")) - return -EBUSY; + platform_set_drvdata(pdev, host); + return 0; - for (i = 0; i < GAYLE_NUM_PROBE_HWIFS; i++) { - base = (unsigned long)ZTWO_VADDR(phys_base + i * GAYLE_NEXT_PORT); - ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0; +out: + release_mem_region(res->start, resource_size(res)); + return error; +} + +static int __exit amiga_gayle_ide_remove(struct platform_device *pdev) +{ + struct ide_host *host = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + ide_host_remove(host); + release_mem_region(res->start, resource_size(res)); + return 0; +} - gayle_setup_ports(&hw[i], base, ctrlport, irqport); +static struct platform_driver amiga_gayle_ide_driver = { + .remove = __exit_p(amiga_gayle_ide_remove), + .driver = { + .name = "amiga-gayle-ide", + .owner = THIS_MODULE, + }, +}; - hws[i] = &hw[i]; - } +static int __init amiga_gayle_ide_init(void) +{ + return platform_driver_probe(&amiga_gayle_ide_driver, + amiga_gayle_ide_probe); +} - rc = ide_host_add(&d, hws, i, NULL); - if (rc) - release_mem_region(res_start, res_n); +module_init(amiga_gayle_ide_init); - return rc; +static void __exit amiga_gayle_ide_exit(void) +{ + platform_driver_unregister(&amiga_gayle_ide_driver); } -module_init(gayle_init); +module_exit(amiga_gayle_ide_exit); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-gayle-ide"); diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig index f15e90a453d1..fb5c5186d4aa 100644 --- a/drivers/idle/Kconfig +++ b/drivers/idle/Kconfig @@ -1,3 +1,14 @@ +config INTEL_IDLE + tristate "Cpuidle Driver for Intel Processors" + depends on CPU_IDLE + depends on X86 + depends on CPU_SUP_INTEL + depends on EXPERIMENTAL + help + Enable intel_idle, a cpuidle driver that includes knowledge of + native Intel hardware idle features. The acpi_idle driver + can be configured at the same time, in order to handle + processors intel_idle does not support. menu "Memory power savings" depends on X86_64 diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile index 5f68fc377e21..23d295cf10f2 100644 --- a/drivers/idle/Makefile +++ b/drivers/idle/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_I7300_IDLE) += i7300_idle.o +obj-$(CONFIG_INTEL_IDLE) += intel_idle.o diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c new file mode 100755 index 000000000000..54f0fb4cd5d2 --- /dev/null +++ b/drivers/idle/intel_idle.c @@ -0,0 +1,461 @@ +/* + * intel_idle.c - native hardware idle loop for modern Intel processors + * + * Copyright (c) 2010, Intel Corporation. + * Len Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * intel_idle is a cpuidle driver that loads on specific Intel processors + * in lieu of the legacy ACPI processor_idle driver. The intent is to + * make Linux more efficient on these processors, as intel_idle knows + * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. + */ + +/* + * Design Assumptions + * + * All CPUs have same idle states as boot CPU + * + * Chipset BM_STS (bus master status) bit is a NOP + * for preventing entry into deep C-stats + */ + +/* + * Known limitations + * + * The driver currently initializes for_each_online_cpu() upon modprobe. + * It it unaware of subsequent processors hot-added to the system. + * This means that if you boot with maxcpus=n and later online + * processors above n, those processors will use C1 only. + * + * ACPI has a .suspend hack to turn off deep c-statees during suspend + * to avoid complications with the lapic timer workaround. + * Have not seen issues with suspend, but may need same workaround here. + * + * There is currently no kernel-based automatic probing/loading mechanism + * if the driver is built as a module. + */ + +/* un-comment DEBUG to enable pr_debug() statements */ +#define DEBUG + +#include +#include +#include +#include /* ktime_get_real() */ +#include +#include + +#define INTEL_IDLE_VERSION "0.4" +#define PREFIX "intel_idle: " + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_CSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) +#define MWAIT_MAX_NUM_CSTATES 8 +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) + +static struct cpuidle_driver intel_idle_driver = { + .name = "intel_idle", + .owner = THIS_MODULE, +}; +/* intel_idle.max_cstate=0 disables driver */ +static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; +static int power_policy = 7; /* 0 = max perf; 15 = max powersave */ + +static unsigned int substates; +static int (*choose_substate)(int); + +/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ +static unsigned int lapic_timer_reliable_states; + +static struct cpuidle_device *intel_idle_cpuidle_devices; +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); + +static struct cpuidle_state *cpuidle_state_table; + +/* + * States are indexed by the cstate number, + * which is also the index into the MWAIT hint array. + * Thus C0 is a dummy. + */ +static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { + { /* MWAIT C0 */ }, + { /* MWAIT C1 */ + .name = "NHM-C1", + .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 3, + .power_usage = 1000, + .target_residency = 6, + .enter = &intel_idle }, + { /* MWAIT C2 */ + .name = "NHM-C3", + .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 20, + .power_usage = 500, + .target_residency = 80, + .enter = &intel_idle }, + { /* MWAIT C3 */ + .name = "NHM-C6", + .desc = "MWAIT 0x20", + .driver_data = (void *) 0x20, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 200, + .power_usage = 350, + .target_residency = 800, + .enter = &intel_idle }, +}; + +static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { + { /* MWAIT C0 */ }, + { /* MWAIT C1 */ + .name = "ATM-C1", + .desc = "MWAIT 0x00", + .driver_data = (void *) 0x00, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 1, + .power_usage = 1000, + .target_residency = 4, + .enter = &intel_idle }, + { /* MWAIT C2 */ + .name = "ATM-C2", + .desc = "MWAIT 0x10", + .driver_data = (void *) 0x10, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 20, + .power_usage = 500, + .target_residency = 80, + .enter = &intel_idle }, + { /* MWAIT C3 */ }, + { /* MWAIT C4 */ + .name = "ATM-C4", + .desc = "MWAIT 0x30", + .driver_data = (void *) 0x30, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 100, + .power_usage = 250, + .target_residency = 400, + .enter = &intel_idle }, + { /* MWAIT C5 */ }, + { /* MWAIT C6 */ + .name = "ATM-C6", + .desc = "MWAIT 0x40", + .driver_data = (void *) 0x40, + .flags = CPUIDLE_FLAG_TIME_VALID, + .exit_latency = 200, + .power_usage = 150, + .target_residency = 800, + .enter = NULL }, /* disabled */ +}; + +/* + * choose_tunable_substate() + * + * Run-time decision on which C-state substate to invoke + * If power_policy = 0, choose shallowest substate (0) + * If power_policy = 15, choose deepest substate + * If power_policy = middle, choose middle substate etc. + */ +static int choose_tunable_substate(int cstate) +{ + unsigned int num_substates; + unsigned int substate_choice; + + power_policy &= 0xF; /* valid range: 0-15 */ + cstate &= 7; /* valid range: 0-7 */ + + num_substates = (substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; + + if (num_substates <= 1) + return 0; + + substate_choice = ((power_policy + (power_policy + 1) * + (num_substates - 1)) / 16); + + return substate_choice; +} + +/* + * choose_zero_substate() + */ +static int choose_zero_substate(int cstate) +{ + return 0; +} + +/** + * intel_idle + * @dev: cpuidle_device + * @state: cpuidle state + * + */ +static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) +{ + unsigned long ecx = 1; /* break on interrupt flag */ + unsigned long eax = (unsigned long)cpuidle_get_statedata(state); + unsigned int cstate; + ktime_t kt_before, kt_after; + s64 usec_delta; + int cpu = smp_processor_id(); + + cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; + + eax = eax + (choose_substate)(cstate); + + local_irq_disable(); + + if (!(lapic_timer_reliable_states & (1 << (cstate)))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); + + kt_before = ktime_get_real(); + + stop_critical_timings(); +#ifndef MODULE + trace_power_start(POWER_CSTATE, (eax >> 4) + 1); +#endif + if (!need_resched()) { + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(eax, ecx); + } + + start_critical_timings(); + + kt_after = ktime_get_real(); + usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); + + local_irq_enable(); + + if (!(lapic_timer_reliable_states & (1 << (cstate)))) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); + + return usec_delta; +} + +/* + * intel_idle_probe() + */ +static int intel_idle_probe(void) +{ + unsigned int eax, ebx, ecx, edx; + + if (max_cstate == 0) { + pr_debug(PREFIX "disabled\n"); + return -EPERM; + } + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_MWAIT)) + return -ENODEV; + + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return -ENODEV; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + return -ENODEV; +#ifdef DEBUG + if (substates == 0) /* can over-ride via modparam */ +#endif + substates = edx; + + pr_debug(PREFIX "MWAIT substates: 0x%x\n", substates); + + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ + lapic_timer_reliable_states = 0xFFFFFFFF; + + if (boot_cpu_data.x86 != 6) /* family 6 */ + return -ENODEV; + + switch (boot_cpu_data.x86_model) { + + case 0x1A: /* Core i7, Xeon 5500 series */ + case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ + case 0x1F: /* Core i7 and i5 Processor - Nehalem */ + case 0x2E: /* Nehalem-EX Xeon */ + lapic_timer_reliable_states = (1 << 1); /* C1 */ + + case 0x25: /* Westmere */ + case 0x2C: /* Westmere */ + cpuidle_state_table = nehalem_cstates; + choose_substate = choose_tunable_substate; + break; + + case 0x1C: /* 28 - Atom Processor */ + lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ + cpuidle_state_table = atom_cstates; + choose_substate = choose_zero_substate; + break; +#ifdef FUTURE_USE + case 0x17: /* 23 - Core 2 Duo */ + lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ +#endif + + default: + pr_debug(PREFIX "does not run on family %d model %d\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + return -ENODEV; + } + + pr_debug(PREFIX "v" INTEL_IDLE_VERSION + " model 0x%X\n", boot_cpu_data.x86_model); + + pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", + lapic_timer_reliable_states); + return 0; +} + +/* + * intel_idle_cpuidle_devices_uninit() + * unregister, free cpuidle_devices + */ +static void intel_idle_cpuidle_devices_uninit(void) +{ + int i; + struct cpuidle_device *dev; + + for_each_online_cpu(i) { + dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + cpuidle_unregister_device(dev); + } + + free_percpu(intel_idle_cpuidle_devices); + return; +} +/* + * intel_idle_cpuidle_devices_init() + * allocate, initialize, register cpuidle_devices + */ +static int intel_idle_cpuidle_devices_init(void) +{ + int i, cstate; + struct cpuidle_device *dev; + + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (intel_idle_cpuidle_devices == NULL) + return -ENOMEM; + + for_each_online_cpu(i) { + dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + + dev->state_count = 1; + + for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { + int num_substates; + + if (cstate > max_cstate) { + printk(PREFIX "max_cstate %d reached\n", + max_cstate); + break; + } + + /* does the state exist in CPUID.MWAIT? */ + num_substates = (substates >> ((cstate) * 4)) + & MWAIT_SUBSTATE_MASK; + if (num_substates == 0) + continue; + /* is the state not enabled? */ + if (cpuidle_state_table[cstate].enter == NULL) { + /* does the driver not know about the state? */ + if (*cpuidle_state_table[cstate].name == '\0') + pr_debug(PREFIX "unaware of model 0x%x" + " MWAIT %d please" + " contact lenb@kernel.org", + boot_cpu_data.x86_model, cstate); + continue; + } + + if ((cstate > 2) && + !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle" + " states deeper than C2"); + + dev->states[dev->state_count] = /* structure copy */ + cpuidle_state_table[cstate]; + + dev->state_count += 1; + } + + dev->cpu = i; + if (cpuidle_register_device(dev)) { + pr_debug(PREFIX "cpuidle_register_device %d failed!\n", + i); + intel_idle_cpuidle_devices_uninit(); + return -EIO; + } + } + + return 0; +} + + +static int __init intel_idle_init(void) +{ + int retval; + + retval = intel_idle_probe(); + if (retval) + return retval; + + retval = cpuidle_register_driver(&intel_idle_driver); + if (retval) { + printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", + cpuidle_get_driver()->name); + return retval; + } + + retval = intel_idle_cpuidle_devices_init(); + if (retval) { + cpuidle_unregister_driver(&intel_idle_driver); + return retval; + } + + return 0; +} + +static void __exit intel_idle_exit(void) +{ + intel_idle_cpuidle_devices_uninit(); + cpuidle_unregister_driver(&intel_idle_driver); + + return; +} + +module_init(intel_idle_init); +module_exit(intel_idle_exit); + +module_param(power_policy, int, 0644); +module_param(max_cstate, int, 0444); +#ifdef DEBUG +module_param(substates, int, 0444); +#endif + +MODULE_AUTHOR("Len Brown "); +MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 9fd4a0d3206e..adaefabc40e9 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -1824,7 +1824,7 @@ static int dv1394_open(struct inode *inode, struct file *file) "and will not be available in the new firewire driver stack. " "Try libraw1394 based programs instead.\n", current->comm); - return 0; + return nonseekable_open(inode, file); } @@ -2153,17 +2153,18 @@ static struct cdev dv1394_cdev; static const struct file_operations dv1394_fops= { .owner = THIS_MODULE, - .poll = dv1394_poll, + .poll = dv1394_poll, .unlocked_ioctl = dv1394_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = dv1394_compat_ioctl, #endif .mmap = dv1394_mmap, .open = dv1394_open, - .write = dv1394_write, - .read = dv1394_read, + .write = dv1394_write, + .read = dv1394_read, .release = dv1394_release, - .fasync = dv1394_fasync, + .fasync = dv1394_fasync, + .llseek = no_llseek, }; diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 8aa56ac07e29..b563d5e9fa2e 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -2834,7 +2834,7 @@ static int raw1394_open(struct inode *inode, struct file *file) file->private_data = fi; - return 0; + return nonseekable_open(inode, file); } static int raw1394_release(struct inode *inode, struct file *file) @@ -3035,6 +3035,7 @@ static const struct file_operations raw1394_fops = { .poll = raw1394_poll, .open = raw1394_open, .release = raw1394_release, + .llseek = no_llseek, }; static int __init init_raw1394(void) diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 949064a05675..a42bd6893bcf 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1239,7 +1239,7 @@ static int video1394_open(struct inode *inode, struct file *file) ctx->current_ctx = NULL; file->private_data = ctx; - return 0; + return nonseekable_open(inode, file); } static int video1394_release(struct inode *inode, struct file *file) @@ -1287,7 +1287,8 @@ static const struct file_operations video1394_fops= .poll = video1394_poll, .mmap = video1394_mmap, .open = video1394_open, - .release = video1394_release + .release = video1394_release, + .llseek = no_llseek, }; /*** HOTPLUG STUFF **********************************************************/ diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 46474842cfe9..08f948df8fa9 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -706,14 +706,9 @@ static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len) if (!len) return 0; - data = kmalloc(len, GFP_KERNEL); - if (!data) - return -ENOMEM; - - if (copy_from_user(data, (void __user *)(unsigned long)src, len)) { - kfree(data); - return -EFAULT; - } + data = memdup_user((void __user *)(unsigned long)src, len); + if (IS_ERR(data)) + return PTR_ERR(data); *dest = data; return 0; diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 07cae552cafb..e571e60ecb88 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -847,7 +847,7 @@ static int __cpuinit comp_pool_callback(struct notifier_block *nfb, ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); if (!create_comp_task(pool, cpu)) { ehca_gen_err("Can't create comp_task for cpu: %x", cpu); - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 755470440ef1..edef8527eb34 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -144,10 +144,11 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; + size_t avail; struct qib_devdata *dd = private2dd(file); - return simple_read_from_buffer(buf, count, ppos, counters, - dd->f_read_cntrs(dd, *ppos, NULL, &counters)); + avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); } /* read the per-device counters */ @@ -155,10 +156,11 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; + size_t avail; struct qib_devdata *dd = private2dd(file); - return simple_read_from_buffer(buf, count, ppos, names, - dd->f_read_cntrs(dd, *ppos, &names, NULL)); + avail = dd->f_read_cntrs(dd, *ppos, &names, NULL); + return simple_read_from_buffer(buf, count, ppos, names, avail); } static const struct file_operations cntr_ops[] = { @@ -176,10 +178,11 @@ static ssize_t portnames_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; + size_t avail; struct qib_devdata *dd = private2dd(file); - return simple_read_from_buffer(buf, count, ppos, names, - dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL)); + avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL); + return simple_read_from_buffer(buf, count, ppos, names, avail); } /* read the per-port counters for port 1 (pidx 0) */ @@ -187,10 +190,11 @@ static ssize_t portcntrs_1_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; + size_t avail; struct qib_devdata *dd = private2dd(file); - return simple_read_from_buffer(buf, count, ppos, counters, - dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters)); + avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); } /* read the per-port counters for port 2 (pidx 1) */ @@ -198,10 +202,11 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; + size_t avail; struct qib_devdata *dd = private2dd(file); - return simple_read_from_buffer(buf, count, ppos, counters, - dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters)); + avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters); + return simple_read_from_buffer(buf, count, ppos, counters, avail); } static const struct file_operations portcntr_ops[] = { diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 7b6549fd429b..1eadadc13da8 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3475,14 +3475,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, struct qib_devdata *dd; int ret; -#ifndef CONFIG_PCI_MSI - qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " - "work if CONFIG_PCI_MSI is not enabled\n", - ent->device); - dd = ERR_PTR(-ENODEV); - goto bail; -#endif - dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) + sizeof(struct qib_chip_specific)); if (IS_ERR(dd)) @@ -3554,10 +3546,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev, if (qib_mini_init) goto bail; -#ifndef CONFIG_PCI_MSI - qib_dev_err(dd, "PCI_MSI not configured, NO interrupts\n"); -#endif - if (qib_pcie_params(dd, 8, NULL, NULL)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; " "continuing anyway\n"); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 2c24eab35b54..503992d9c5ce 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -42,9 +42,6 @@ #include #include #include -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -#include -#endif #include "qib.h" #include "qib_7322_regs.h" @@ -114,40 +111,18 @@ static ushort qib_singleport; module_param_named(singleport, qib_singleport, ushort, S_IRUGO); MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); - -/* - * Setup QMH7342 receive and transmit parameters, necessary because - * each bay, Mez connector, and IB port need different tuning, beyond - * what the switch and HCA can do automatically. - * It's expected to be done by cat'ing files to the modules file, - * rather than setting up as a module parameter. - * It's a "write-only" file, returns 0 when read back. - * The unit, port, bay (if given), and values MUST be done as a single write. - * The unit, port, and bay must precede the values to be effective. - */ -static int setup_qmh_params(const char *, struct kernel_param *); -static unsigned dummy_qmh_params; -module_param_call(qmh_serdes_setup, setup_qmh_params, param_get_uint, - &dummy_qmh_params, S_IWUSR | S_IRUGO); - -/* similarly for QME7342, but it's simpler */ -static int setup_qme_params(const char *, struct kernel_param *); -static unsigned dummy_qme_params; -module_param_call(qme_serdes_setup, setup_qme_params, param_get_uint, - &dummy_qme_params, S_IWUSR | S_IRUGO); - #define MAX_ATTEN_LEN 64 /* plenty for any real system */ /* for read back, default index is ~5m copper cable */ -static char cable_atten_list[MAX_ATTEN_LEN] = "10"; -static struct kparam_string kp_cable_atten = { - .string = cable_atten_list, +static char txselect_list[MAX_ATTEN_LEN] = "10"; +static struct kparam_string kp_txselect = { + .string = txselect_list, .maxlen = MAX_ATTEN_LEN }; -static int setup_cable_atten(const char *, struct kernel_param *); -module_param_call(cable_atten, setup_cable_atten, param_get_string, - &kp_cable_atten, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(cable_atten, \ - "cable attenuation indices for cables with invalid EEPROM"); +static int setup_txselect(const char *, struct kernel_param *); +module_param_call(txselect, setup_txselect, param_get_string, + &kp_txselect, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(txselect, \ + "Tx serdes indices (for no QSFP or invalid QSFP data)"); #define BOARD_QME7342 5 #define BOARD_QMH7342 6 @@ -540,12 +515,6 @@ struct qib_chip_specific { u32 lastbuf_for_pio; u32 stay_in_freeze; u32 recovery_ports_initted; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - u32 dca_ctrl; - int rhdr_cpu[18]; - int sdma_cpu[2]; - u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */ -#endif struct msix_entry *msix_entries; void **msix_arg; unsigned long *sendchkenable; @@ -574,11 +543,12 @@ struct vendor_txdds_ent { static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ +#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ #define H1_FORCE_VAL 8 -#define H1_FORCE_QME 1 /* may be overridden via setup_qme_params() */ -#define H1_FORCE_QMH 7 /* may be overridden via setup_qmh_params() */ +#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */ +#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */ /* The static and dynamic registers are paired, and the pairs indexed by spd */ #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \ @@ -590,15 +560,6 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */ #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */ -static const struct txdds_ent qmh_sdr_txdds = { 11, 0, 5, 6 }; -static const struct txdds_ent qmh_ddr_txdds = { 7, 0, 2, 8 }; -static const struct txdds_ent qmh_qdr_txdds = { 0, 1, 3, 10 }; - -/* this is used for unknown mez cards also */ -static const struct txdds_ent qme_sdr_txdds = { 11, 0, 4, 4 }; -static const struct txdds_ent qme_ddr_txdds = { 7, 0, 2, 7 }; -static const struct txdds_ent qme_qdr_txdds = { 0, 1, 12, 11 }; - struct qib_chippport_specific { u64 __iomem *kpregbase; u64 __iomem *cpregbase; @@ -637,12 +598,8 @@ struct qib_chippport_specific { * Per-bay per-channel rcv QMH H1 values and Tx values for QDR. * entry zero is unused, to simplify indexing */ - u16 h1_val; - u8 amp[SERDES_CHANS]; - u8 pre[SERDES_CHANS]; - u8 mainv[SERDES_CHANS]; - u8 post[SERDES_CHANS]; - u8 no_eep; /* attenuation index to use if no qsfp info */ + u8 h1_val; + u8 no_eep; /* txselect table index to use if no qsfp info */ u8 ipg_tries; u8 ibmalfusesnap; struct qib_qsfp_data qsfp_data; @@ -676,52 +633,6 @@ static struct { SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, }; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -static const struct dca_reg_map { - int shadow_inx; - int lsb; - u64 mask; - u16 regno; -} dca_rcvhdr_reg_map[] = { - { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH), - ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) }, - { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH), - ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) }, - { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH), - ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) }, - { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH), - ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) }, - { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH), - ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) }, - { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH), - ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) }, - { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH), - ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) }, - { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH), - ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) }, - { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH), - ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) }, - { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH), - ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) }, - { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH), - ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) }, - { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH), - ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) }, - { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH), - ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) }, - { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH), - ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) }, - { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH), - ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) }, - { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH), - ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) }, - { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH), - ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) }, - { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH), - ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) }, -}; -#endif - /* ibcctrl bits */ #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1 /* cycle through TS1/TS2 till OK */ @@ -2572,95 +2483,6 @@ static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on) qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink); } -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) -static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd) -{ - struct qib_devdata *dd = rcd->dd; - struct qib_chip_specific *cspec = dd->cspec; - int cpu = get_cpu(); - - if (cspec->rhdr_cpu[rcd->ctxt] != cpu) { - const struct dca_reg_map *rmp; - - cspec->rhdr_cpu[rcd->ctxt] = cpu; - rmp = &dca_rcvhdr_reg_map[rcd->ctxt]; - cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask; - cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |= - (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb; - qib_write_kreg(dd, rmp->regno, - cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]); - cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable); - qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); - } - put_cpu(); -} - -static void qib_update_sdma_dca(struct qib_pportdata *ppd) -{ - struct qib_devdata *dd = ppd->dd; - struct qib_chip_specific *cspec = dd->cspec; - int cpu = get_cpu(); - unsigned pidx = ppd->port - 1; - - if (cspec->sdma_cpu[pidx] != cpu) { - cspec->sdma_cpu[pidx] = cpu; - cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ? - SYM_MASK(DCACtrlF, SendDma1DCAOPH) : - SYM_MASK(DCACtrlF, SendDma0DCAOPH)); - cspec->dca_rcvhdr_ctrl[4] |= - (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << - (ppd->hw_pidx ? - SYM_LSB(DCACtrlF, SendDma1DCAOPH) : - SYM_LSB(DCACtrlF, SendDma0DCAOPH)); - qib_write_kreg(dd, KREG_IDX(DCACtrlF), - cspec->dca_rcvhdr_ctrl[4]); - cspec->dca_ctrl |= ppd->hw_pidx ? - SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) : - SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable); - qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl); - } - put_cpu(); -} - -static void qib_setup_dca(struct qib_devdata *dd) -{ - struct qib_chip_specific *cspec = dd->cspec; - int i; - - for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++) - cspec->rhdr_cpu[i] = -1; - for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) - cspec->sdma_cpu[i] = -1; - cspec->dca_rcvhdr_ctrl[0] = - (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt)); - cspec->dca_rcvhdr_ctrl[1] = - (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt)); - cspec->dca_rcvhdr_ctrl[2] = - (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt)); - cspec->dca_rcvhdr_ctrl[3] = - (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt)); - cspec->dca_rcvhdr_ctrl[4] = - (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) | - (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt)); - for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++) - qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i, - cspec->dca_rcvhdr_ctrl[i]); -} - -#endif - /* * Disable MSIx interrupt if enabled, call generic MSIx code * to cleanup, and clear pending MSIx interrupts. @@ -2701,15 +2523,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) { int i; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) { - dca_remove_requester(&dd->pcidev->dev); - dd->flags &= ~QIB_DCA_ENABLED; - dd->cspec->dca_ctrl = 0; - qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl); - } -#endif - qib_7322_free_irq(dd); kfree(dd->cspec->cntrs); kfree(dd->cspec->sendchkenable); @@ -3017,11 +2830,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data) if (dd->int_counter != (u32) -1) dd->int_counter++; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_rhdrq_dca(rcd); -#endif - /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); @@ -3085,11 +2893,6 @@ static irqreturn_t sdma_intr(int irq, void *data) if (dd->int_counter != (u32) -1) dd->int_counter++; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_sdma_dca(ppd); -#endif - /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0)); @@ -3119,11 +2922,6 @@ static irqreturn_t sdma_idle_intr(int irq, void *data) if (dd->int_counter != (u32) -1) dd->int_counter++; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_sdma_dca(ppd); -#endif - /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0)); @@ -3153,11 +2951,6 @@ static irqreturn_t sdma_progress_intr(int irq, void *data) if (dd->int_counter != (u32) -1) dd->int_counter++; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_sdma_dca(ppd); -#endif - /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_P(SDmaProgress, 1) : @@ -3188,11 +2981,6 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data) if (dd->int_counter != (u32) -1) dd->int_counter++; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_sdma_dca(ppd); -#endif - /* Clear the interrupt bit we expect to be set. */ qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? INT_MASK_PM(SDmaCleanupDone, 1) : @@ -4299,10 +4087,6 @@ static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op, qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, rcd->rcvhdrq_phys); rcd->seq_cnt = 1; -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - if (dd->flags & QIB_DCA_ENABLED) - qib_update_rhdrq_dca(rcd); -#endif } if (op & QIB_RCVCTRL_CTXT_DIS) ppd->p_rcvctrl &= @@ -5360,7 +5144,13 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) QIBL_IB_AUTONEG_INPROG))) set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled); if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) { + /* unlock the Tx settings, speed may change */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); qib_cancel_sends(ppd); + /* on link down, ensure sane pcs state */ + qib_7322_mini_pcs_reset(ppd); spin_lock_irqsave(&ppd->sdma_lock, flags); if (__qib_sdma_running(ppd)) __qib_sdma_process_event(ppd, @@ -5766,26 +5556,28 @@ static void qib_init_7322_qsfp(struct qib_pportdata *ppd) } /* - * called at device initialization time, and also if the cable_atten + * called at device initialization time, and also if the txselect * module parameter is changed. This is used for cables that don't * have valid QSFP EEPROMs (not present, or attenuation is zero). * We initialize to the default, then if there is a specific - * unit,port match, we use that. + * unit,port match, we use that (and set it immediately, for the + * current speed, if the link is at INIT or better). * String format is "default# unit#,port#=# ... u,p=#", separators must - * be a SPACE character. A newline terminates. + * be a SPACE character. A newline terminates. The u,p=# tuples may + * optionally have "u,p=#,#", where the final # is the H1 value * The last specific match is used (actually, all are used, but last * one is the one that winds up set); if none at all, fall back on default. */ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) { char *nxt, *str; - int pidx, unit, port, deflt; + u32 pidx, unit, port, deflt, h1; unsigned long val; - int any = 0; + int any = 0, seth1; - str = cable_atten_list; + str = txselect_list; - /* default number is validated in setup_cable_atten() */ + /* default number is validated in setup_txselect() */ deflt = simple_strtoul(str, &nxt, 0); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->pport[pidx].cpspec->no_eep = deflt; @@ -5812,16 +5604,28 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) ; continue; } - if (val >= TXDDS_TABLE_SZ) + if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) continue; + seth1 = 0; + h1 = 0; /* gcc thinks it might be used uninitted */ + if (*nxt == ',' && nxt[1]) { + str = ++nxt; + h1 = (u32)simple_strtoul(str, &nxt, 0); + if (nxt == str) + while (*nxt && *nxt++ != ' ') /* skip */ + ; + else + seth1 = 1; + } for (pidx = 0; dd->unit == unit && pidx < dd->num_pports; ++pidx) { - if (dd->pport[pidx].port != port || - !dd->pport[pidx].link_speed_supported) + struct qib_pportdata *ppd = &dd->pport[pidx]; + + if (ppd->port != port || !ppd->link_speed_supported) continue; - dd->pport[pidx].cpspec->no_eep = val; + ppd->cpspec->no_eep = val; /* now change the IBC and serdes, overriding generic */ - init_txdds_table(&dd->pport[pidx], 1); + init_txdds_table(ppd, 1); any++; } if (*nxt == '\n') @@ -5832,35 +5636,35 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) * Change the IBC and serdes, but since it's * general, don't override specific settings. */ - for (pidx = 0; pidx < dd->num_pports; ++pidx) { - if (!dd->pport[pidx].link_speed_supported) - continue; - init_txdds_table(&dd->pport[pidx], 0); - } + for (pidx = 0; pidx < dd->num_pports; ++pidx) + if (dd->pport[pidx].link_speed_supported) + init_txdds_table(&dd->pport[pidx], 0); } } -/* handle the cable_atten parameter changing */ -static int setup_cable_atten(const char *str, struct kernel_param *kp) +/* handle the txselect parameter changing */ +static int setup_txselect(const char *str, struct kernel_param *kp) { struct qib_devdata *dd; unsigned long val; char *n; if (strlen(str) >= MAX_ATTEN_LEN) { - printk(KERN_INFO QIB_DRV_NAME " cable_atten_values string " + printk(KERN_INFO QIB_DRV_NAME " txselect_values string " "too long\n"); return -ENOSPC; } val = simple_strtoul(str, &n, 0); - if (n == str || val >= TXDDS_TABLE_SZ) { + if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { printk(KERN_INFO QIB_DRV_NAME - "cable_atten_values must start with a number\n"); + "txselect_values must start with a number < %d\n", + TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); return -EINVAL; } - strcpy(cable_atten_list, str); + strcpy(txselect_list, str); list_for_each_entry(dd, &qib_dev_list, list) - set_no_qsfp_atten(dd, 1); + if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) + set_no_qsfp_atten(dd, 1); return 0; } @@ -6261,28 +6065,17 @@ static int qib_init_7322_variables(struct qib_devdata *dd) * in adapter-specific routines. */ if (!(ppd->dd->flags & QIB_HAS_QSFP)) { - int i; - const struct txdds_ent *txdds; - if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " "Unknown mezzanine card type\n", - ppd->dd->unit, ppd->port); - txdds = IS_QMH(ppd->dd) ? &qmh_qdr_txdds : - &qme_qdr_txdds; - + dd->unit, ppd->port); + cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; /* - * set values in case link comes up - * before table is written to driver. + * Choose center value as default tx serdes setting + * until changed through module parameter. */ - cp->h1_val = IS_QMH(ppd->dd) ? H1_FORCE_QMH : - H1_FORCE_QME; - for (i = 0; i < SERDES_CHANS; i++) { - cp->amp[i] = txdds->amp; - cp->pre[i] = txdds->pre; - cp->mainv[i] = txdds->main; - cp->post[i] = txdds->post; - } + ppd->cpspec->no_eep = IS_QMH(dd) ? + TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4; } else cp->h1_val = H1_FORCE_VAL; @@ -6299,8 +6092,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; - dd->rhf_offset = - dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); + dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); /* we always allocate at least 2048 bytes for eager buffers */ dd->rcvegrbufsize = max(mtu, 2048); @@ -6919,13 +6711,6 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, /* clear diagctrl register, in case diags were running and crashed */ qib_write_kreg(dd, kr_hwdiagctrl, 0); -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - ret = dca_add_requester(&pdev->dev); - if (!ret) { - dd->flags |= QIB_DCA_ENABLED; - qib_setup_dca(dd); - } -#endif goto bail; bail_cleanup: @@ -7111,8 +6896,8 @@ static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = { static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { /* amp, pre, main, post */ { 2, 2, 15, 6 }, /* Loopback */ - { 0, 1, 0, 7 }, /* 2 dB */ - { 0, 1, 0, 9 }, /* 3 dB */ + { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */ + { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */ { 0, 1, 0, 11 }, /* 4 dB */ { 0, 1, 0, 13 }, /* 5 dB */ { 0, 1, 0, 15 }, /* 6 dB */ @@ -7128,6 +6913,57 @@ static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = { { 0, 2, 9, 15 }, /* 16 dB */ }; +/* + * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ. + * These are mostly used for mez cards going through connectors + * and backplane traces, but can be used to add other "unusual" + * table values as well. + */ +static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 1 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 2 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ + { 0, 0, 0, 11 }, /* QME7342 backplane settings */ +}; + +static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 7 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ + { 0, 0, 0, 13 }, /* QME7342 backplane settings */ +}; + +static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { + /* amp, pre, main, post */ + { 0, 1, 0, 4 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 5 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 6 }, /* QMH7342 backplane settings */ + { 0, 1, 0, 8 }, /* QMH7342 backplane settings */ + { 0, 1, 12, 10 }, /* QME7342 backplane setting */ + { 0, 1, 12, 11 }, /* QME7342 backplane setting */ + { 0, 1, 12, 12 }, /* QME7342 backplane setting */ + { 0, 1, 12, 14 }, /* QME7342 backplane setting */ + { 0, 1, 12, 6 }, /* QME7342 backplane setting */ + { 0, 1, 12, 7 }, /* QME7342 backplane setting */ + { 0, 1, 12, 8 }, /* QME7342 backplane setting */ +}; + static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, unsigned atten) { @@ -7145,7 +6981,7 @@ static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, } /* - * if override is set, the module parameter cable_atten has a value + * if override is set, the module parameter txselect has a value * for this specific port, so use it, rather than our normal mechanism. */ static void find_best_ent(struct qib_pportdata *ppd, @@ -7184,15 +7020,28 @@ static void find_best_ent(struct qib_pportdata *ppd, *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]); *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]); return; - } else { + } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) { /* * If we have no (or incomplete) data from the cable - * EEPROM, or no QSFP, use the module parameter value - * to index into the attentuation table. + * EEPROM, or no QSFP, or override is set, use the + * module parameter value to index into the attentuation + * table. */ - *sdr_dds = &txdds_sdr[ppd->cpspec->no_eep]; - *ddr_dds = &txdds_ddr[ppd->cpspec->no_eep]; - *qdr_dds = &txdds_qdr[ppd->cpspec->no_eep]; + idx = ppd->cpspec->no_eep; + *sdr_dds = &txdds_sdr[idx]; + *ddr_dds = &txdds_ddr[idx]; + *qdr_dds = &txdds_qdr[idx]; + } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { + /* similar to above, but index into the "extra" table. */ + idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ; + *sdr_dds = &txdds_extra_sdr[idx]; + *ddr_dds = &txdds_extra_ddr[idx]; + *qdr_dds = &txdds_extra_qdr[idx]; + } else { + /* this shouldn't happen, it's range checked */ + *sdr_dds = txdds_sdr + qib_long_atten; + *ddr_dds = txdds_ddr + qib_long_atten; + *qdr_dds = txdds_qdr + qib_long_atten; } } @@ -7203,33 +7052,24 @@ static void init_txdds_table(struct qib_pportdata *ppd, int override) int idx; int single_ent = 0; - if (IS_QMH(ppd->dd)) { - /* normally will be overridden, via setup_qmh() */ - sdr_dds = &qmh_sdr_txdds; - ddr_dds = &qmh_ddr_txdds; - qdr_dds = &qmh_qdr_txdds; - single_ent = 1; - } else if (IS_QME(ppd->dd)) { - sdr_dds = &qme_sdr_txdds; - ddr_dds = &qme_ddr_txdds; - qdr_dds = &qme_qdr_txdds; + find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); + + /* for mez cards or override, use the selected value for all entries */ + if (!(ppd->dd->flags & QIB_HAS_QSFP) || override) single_ent = 1; - } else - find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override); /* Fill in the first entry with the best entry found. */ set_txdds(ppd, 0, sdr_dds); set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds); set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds); - - /* - * for our current speed, also write that value into the - * tx serdes registers. - */ - dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? - qdr_dds : (ppd->link_speed_active == - QIB_IB_DDR ? ddr_dds : sdr_dds)); - write_tx_serdes_param(ppd, dds); + if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | + QIBL_LINKACTIVE)) { + dds = (struct txdds_ent *)(ppd->link_speed_active == + QIB_IB_QDR ? qdr_dds : + (ppd->link_speed_active == + QIB_IB_DDR ? ddr_dds : sdr_dds)); + write_tx_serdes_param(ppd, dds); + } /* Fill in the remaining entries with the default table values. */ for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) { @@ -7352,6 +7192,11 @@ static int serdes_7322_init(struct qib_pportdata *ppd) */ init_txdds_table(ppd, 0); + /* ensure no tx overrides from earlier driver loads */ + qib_write_kreg_port(ppd, krp_tx_deemph_override, + SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + reset_tx_deemphasis_override)); + /* Patch some SerDes defaults to "Better for IB" */ /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */ ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); @@ -7421,7 +7266,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd) QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); ppd->cpspec->qdr_dfe_on = 1; - /* (FLoop LOS gate: PPM filter enabled */ + /* FLoop LOS gate: PPM filter enabled */ ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); /* rx offset center enabled */ @@ -7486,68 +7331,39 @@ static void write_tx_serdes_param(struct qib_pportdata *ppd, SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena)); - deemph |= 1ULL << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - tx_override_deemphasis_select); - deemph |= txdds->amp << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txampcntl_d2a); - deemph |= txdds->main << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txc0_ena); - deemph |= txdds->post << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txcp1_ena); - deemph |= txdds->pre << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + + deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + tx_override_deemphasis_select); + deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txampcntl_d2a); + deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txc0_ena); + deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcp1_ena); + deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, + txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena); qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph); } /* - * set per-bay, per channel parameters. For now, we ignore - * do_tx, and always set tx parameters, and set them with the same value - * for all channels, using the channel 0 value. We may switch to - * per-channel settings in the future, and that method only needs - * to be done once. - * Because this also writes the IBC txdds table with a single set - * of values, it should be called only for cases where we want to completely - * force a specific setting, typically only for mez cards. + * Set the parameters for mez cards on link bounce, so they are + * always exactly what was requested. Similar logic to init_txdds + * but does just the serdes. */ static void adj_tx_serdes(struct qib_pportdata *ppd) { - struct txdds_ent txdds; - int i; - u8 *amp, *pre, *mainv, *post; - - /* - * Because we use TX_DEEMPHASIS_OVERRIDE, we need to - * always do tx side, just like H1, since it is cleared - * by link down - */ - amp = ppd->cpspec->amp; - pre = ppd->cpspec->pre; - mainv = ppd->cpspec->mainv; - post = ppd->cpspec->post; - - amp[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txampcntl_d2a); - mainv[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txc0_ena); - post[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txcp1_ena); - pre[0] &= SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, - txcn1_ena); - - /* - * Use the channel zero values, only, for now, for - * all channels - */ - txdds.amp = amp[0]; - txdds.pre = pre[0]; - txdds.main = mainv[0]; - txdds.post = post[0]; - - /* write the QDR table for IBC use, as backup for link down */ - for (i = 0; i < ARRAY_SIZE(txdds_qdr); ++i) - set_txdds(ppd, i + 32, &txdds); + const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds; + struct txdds_ent *dds; - write_tx_serdes_param(ppd, &txdds); + find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1); + dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ? + qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ? + ddr_dds : sdr_dds)); + write_tx_serdes_param(ppd, dds); } /* set QDR forced value for H1, if needed */ @@ -7567,235 +7383,6 @@ static void force_h1(struct qib_pportdata *ppd) } } -/* - * Parse the parameters for the QMH7342, to get rx and tx serdes - * settings for that Bay, for both possible mez connectors (PCIe bus) - * and IB link (one link on mez1, two possible on mez2). - * - * Data is comma or white space separated. - * - * A set of data has 7 groups, rx and tx groups have SERDES_CHANS values, - * one per IB lane (serdes channel). - * The groups are Bay, bus# H1 rcv, and amp, pre, post, main Tx values (QDR). - * The Bay # is used only for debugging currently. - * H1 values are set whenever the link goes down, or is at cfg_test or - * cfg_wait_enh. Tx values are programmed once, when this routine is called - * (and with default values at chip initialization). Values are any base, in - * strtoul style, and values are seperated by comma, or any white space - * (space, tab, newline). - * - * An example set might look like this (white space vs - * comma used for human ease of reading) - * The ordering is a set of Bay# Bus# H1, amp, pre, post, and main for mez1 IB1, - * repeat for mez2 IB1, then mez2 IB2. - * - * B B H1:0 amp:0 pre:0 post: 0 main:0 - * a u H1: 1 amp: 1 pre: 1 post: 1 main: 1 - * y s H1: 2 amp: 2 pre: 2 post: 2 main: 2 - * H1: 4 amp: 3 pre: 3 post: 3 main: 3 - * 1 3 8,6,5,6 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 - * 1 6 7,6,6,7 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 - * 1 6 9,7,7,8 0,0,0,0 1,1,1,1 10,10,10,10 3,3,3,3 - */ -#define N_QMH_FIELDS 22 -static int setup_qmh_params(const char *str, struct kernel_param *kp) -{ - char *abuf, *v, *nv, *nvp; - struct qib_devdata *dd; - struct qib_pportdata *ppd; - u32 mez, vlen, nf, port, bay; - int ret = 0, found = 0; - - vlen = strlen(str) + 1; - abuf = kmalloc(vlen, GFP_KERNEL); - if (!abuf) { - printk(KERN_INFO QIB_DRV_NAME - " Unable to allocate QMH param buffer; ignoring\n"); - return 0; - } - memcpy(abuf, str, vlen); - v = abuf; - - /* these 3 are because gcc can't know they are set before used */ - port = 1; - mez = 1; /* used only for debugging */ - bay = 0; /* used only for debugging */ - ppd = NULL; - for (nf = 0; (nv = strsep(&v, ", \t\n\r")) && - nf < (N_QMH_FIELDS * 3);) { - u32 val; - - if (!*nv) - /* allow for multiple separators */ - continue; - - val = simple_strtoul(nv, &nvp, 0); - if (nv == nvp) { - printk(KERN_INFO QIB_DRV_NAME - " Bay%u, mez%u IB%u non-numeric value (%s) " - "field #%u, ignoring rest\n", bay, mez, - port, nv, nf % (N_QMH_FIELDS * 3)); - ret = -EINVAL; - goto bail; - } - if (!(nf % N_QMH_FIELDS)) { - ppd = NULL; - bay = val; - if (!bay || bay > 16) { - printk(KERN_INFO QIB_DRV_NAME - " Invalid bay # %u, field %u, " - "ignoring rest\n", bay, nf); - ret = -EINVAL; - goto bail; - } - } else if ((nf % N_QMH_FIELDS) == 1) { - u32 bus = val; - if (nf == 1) { - mez = 1; - port = 1; - } else if (nf == (N_QMH_FIELDS + 1)) { - mez = 2; - port = 1; - } else { - mez = 2; - port = 2; - } - list_for_each_entry(dd, &qib_dev_list, list) { - if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 - || !IS_QMH(dd)) - continue; /* only for QMH cards */ - if (dd->pcidev->bus->number == bus) { - found++; - ppd = &dd->pport[port - 1]; - } - } - } else if (ppd) { - u32 parm = (nf % N_QMH_FIELDS) - 2; - if (parm < SERDES_CHANS && !(parm % SERDES_CHANS)) - ppd->cpspec->h1_val = val; - else if (parm < (2 * SERDES_CHANS)) - ppd->cpspec->amp[parm % SERDES_CHANS] = val; - else if (parm < (3 * SERDES_CHANS)) - ppd->cpspec->pre[parm % SERDES_CHANS] = val; - else if (parm < (4 * SERDES_CHANS)) - ppd->cpspec->post[parm % SERDES_CHANS] = val; - else { - ppd->cpspec->mainv[parm % SERDES_CHANS] = val; - /* At the end of a port, set params */ - if (parm == ((5 * SERDES_CHANS) - 1)) - adj_tx_serdes(ppd); - } - } - nf++; - } - if (!found) { - printk(KERN_ERR QIB_DRV_NAME - ": No match found for qmh_serdes_setup parameter\n"); - ret = -EINVAL; - } -bail: - kfree(abuf); - return ret; -} - -/* - * Similarly for QME7342, but the format is simpler, values are the - * same for all mez card positions in a blade (2 or 4 per blade), but - * are different for some blades vs others, and we don't need to - * specify different parameters for different serdes channels or different - * IB ports. - * Format is: h1 amp,pre,post,main - * Alternate format (so ports can be different): Pport# h1 amp,pre,post,main - */ -#define N_QME_FIELDS 5 -static int setup_qme_params(const char *str, struct kernel_param *kp) -{ - char *abuf, *v, *nv, *nvp; - struct qib_devdata *dd; - u32 vlen, nf, port = 0; - u8 h1, tx[4]; /* amp, pre, post, main */ - int ret = -EINVAL; - char *seplist; - - vlen = strlen(str) + 1; - abuf = kmalloc(vlen, GFP_KERNEL); - if (!abuf) { - printk(KERN_INFO QIB_DRV_NAME - " Unable to allocate QME param buffer; ignoring\n"); - return 0; - } - strncpy(abuf, str, vlen); - - v = abuf; - seplist = " \t"; - h1 = H1_FORCE_QME; /* gcc can't figure out always set before used */ - - for (nf = 0; (nv = strsep(&v, seplist)); ) { - u32 val; - - if (!*nv) - /* allow for multiple separators */ - continue; - - if (!nf && *nv == 'P') { - /* alternate format with port */ - val = simple_strtoul(++nv, &nvp, 0); - if (nv == nvp || port >= NUM_IB_PORTS) { - printk(KERN_INFO QIB_DRV_NAME - " %s: non-numeric port value (%s) " - "ignoring rest\n", __func__, nv); - goto done; - } - port = val; - continue; /* without incrementing nf */ - } - val = simple_strtoul(nv, &nvp, 0); - if (nv == nvp) { - printk(KERN_INFO QIB_DRV_NAME - " %s: non-numeric value (%s) " - "field #%u, ignoring rest\n", __func__, - nv, nf); - goto done; - } - if (!nf) { - h1 = val; - seplist = ","; - } else - tx[nf - 1] = val; - if (++nf == N_QME_FIELDS) { - list_for_each_entry(dd, &qib_dev_list, list) { - int pidx, i; - if (dd->deviceid != PCI_DEVICE_ID_QLOGIC_IB_7322 - || !IS_QME(dd)) - continue; /* only for QME cards */ - for (pidx = 0; pidx < dd->num_pports; ++pidx) { - struct qib_pportdata *ppd; - ppd = &dd->pport[pidx]; - if ((port && ppd->port != port) || - !ppd->link_speed_supported) - continue; - ppd->cpspec->h1_val = h1; - for (i = 0; i < SERDES_CHANS; i++) { - ppd->cpspec->amp[i] = tx[0]; - ppd->cpspec->pre[i] = tx[1]; - ppd->cpspec->post[i] = tx[2]; - ppd->cpspec->mainv[i] = tx[3]; - } - adj_tx_serdes(ppd); - } - } - ret = 0; - goto done; - } - } - printk(KERN_INFO QIB_DRV_NAME - " %s: Only %u of %u fields provided, skipping\n", - __func__, nf, N_QME_FIELDS); -done: - kfree(abuf); - return ret; -} - #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN) #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en) diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index c0139c07e97e..9b40f345ac3f 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1237,7 +1237,13 @@ static int __devinit qib_init_one(struct pci_dev *pdev, */ switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_IB_6120: +#ifdef CONFIG_PCI_MSI dd = qib_init_iba6120_funcs(pdev, ent); +#else + qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " + "work if CONFIG_PCI_MSI is not enabled\n", + ent->device); +#endif break; case PCI_DEVICE_ID_QLOGIC_IB_7220: diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 423e0e6031ab..34157bb97ed6 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -47,15 +47,15 @@ struct joydev { struct mutex mutex; struct device dev; - struct js_corr corr[ABS_MAX + 1]; + struct js_corr corr[ABS_CNT]; struct JS_DATA_SAVE_TYPE glue; int nabs; int nkey; __u16 keymap[KEY_MAX - BTN_MISC + 1]; __u16 keypam[KEY_MAX - BTN_MISC + 1]; - __u8 absmap[ABS_MAX + 1]; - __u8 abspam[ABS_MAX + 1]; - __s16 abs[ABS_MAX + 1]; + __u8 absmap[ABS_CNT]; + __u8 abspam[ABS_CNT]; + __s16 abs[ABS_CNT]; }; struct joydev_client { @@ -826,7 +826,7 @@ static int joydev_connect(struct input_handler *handler, struct input_dev *dev, joydev->handle.handler = handler; joydev->handle.private = joydev; - for (i = 0; i < ABS_MAX + 1; i++) + for (i = 0; i < ABS_CNT; i++) if (test_bit(i, dev->absbit)) { joydev->absmap[i] = joydev->nabs; joydev->abspam[joydev->nabs] = i; diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c index 35149ec455a9..79172af164f2 100644 --- a/drivers/input/keyboard/amikbd.c +++ b/drivers/input/keyboard/amikbd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -154,10 +155,9 @@ static const char *amikbd_messages[8] = { [7] = KERN_WARNING "amikbd: keyboard interrupt\n" }; -static struct input_dev *amikbd_dev; - -static irqreturn_t amikbd_interrupt(int irq, void *dummy) +static irqreturn_t amikbd_interrupt(int irq, void *data) { + struct input_dev *dev = data; unsigned char scancode, down; scancode = ~ciaa.sdr; /* get and invert scancode (keyboard is active low) */ @@ -170,47 +170,42 @@ static irqreturn_t amikbd_interrupt(int irq, void *dummy) if (scancode < 0x78) { /* scancodes < 0x78 are keys */ if (scancode == 98) { /* CapsLock is a toggle switch key on Amiga */ - input_report_key(amikbd_dev, scancode, 1); - input_report_key(amikbd_dev, scancode, 0); + input_report_key(dev, scancode, 1); + input_report_key(dev, scancode, 0); } else { - input_report_key(amikbd_dev, scancode, down); + input_report_key(dev, scancode, down); } - input_sync(amikbd_dev); + input_sync(dev); } else /* scancodes >= 0x78 are error codes */ printk(amikbd_messages[scancode - 0x78]); return IRQ_HANDLED; } -static int __init amikbd_init(void) +static int __init amikbd_probe(struct platform_device *pdev) { + struct input_dev *dev; int i, j, err; - if (!AMIGAHW_PRESENT(AMI_KEYBOARD)) - return -ENODEV; - - if (!request_mem_region(CIAA_PHYSADDR-1+0xb00, 0x100, "amikeyb")) - return -EBUSY; - - amikbd_dev = input_allocate_device(); - if (!amikbd_dev) { - printk(KERN_ERR "amikbd: not enough memory for input device\n"); - err = -ENOMEM; - goto fail1; + dev = input_allocate_device(); + if (!dev) { + dev_err(&pdev->dev, "Not enough memory for input device\n"); + return -ENOMEM; } - amikbd_dev->name = "Amiga Keyboard"; - amikbd_dev->phys = "amikbd/input0"; - amikbd_dev->id.bustype = BUS_AMIGA; - amikbd_dev->id.vendor = 0x0001; - amikbd_dev->id.product = 0x0001; - amikbd_dev->id.version = 0x0100; + dev->name = pdev->name; + dev->phys = "amikbd/input0"; + dev->id.bustype = BUS_AMIGA; + dev->id.vendor = 0x0001; + dev->id.product = 0x0001; + dev->id.version = 0x0100; + dev->dev.parent = &pdev->dev; - amikbd_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); + dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); for (i = 0; i < 0x78; i++) - set_bit(i, amikbd_dev->keybit); + set_bit(i, dev->keybit); for (i = 0; i < MAX_NR_KEYMAPS; i++) { static u_short temp_map[NR_KEYS] __initdata; @@ -229,30 +224,54 @@ static int __init amikbd_init(void) memcpy(key_maps[i], temp_map, sizeof(temp_map)); } ciaa.cra &= ~0x41; /* serial data in, turn off TA */ - if (request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", - amikbd_interrupt)) { - err = -EBUSY; + err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd", + dev); + if (err) goto fail2; - } - err = input_register_device(amikbd_dev); + err = input_register_device(dev); if (err) goto fail3; + platform_set_drvdata(pdev, dev); + return 0; - fail3: free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); - fail2: input_free_device(amikbd_dev); - fail1: release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100); + fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev); + fail2: input_free_device(dev); return err; } -static void __exit amikbd_exit(void) +static int __exit amikbd_remove(struct platform_device *pdev) +{ + struct input_dev *dev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + free_irq(IRQ_AMIGA_CIAA_SP, dev); + input_unregister_device(dev); + return 0; +} + +static struct platform_driver amikbd_driver = { + .remove = __exit_p(amikbd_remove), + .driver = { + .name = "amiga-keyboard", + .owner = THIS_MODULE, + }, +}; + +static int __init amikbd_init(void) { - free_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt); - input_unregister_device(amikbd_dev); - release_mem_region(CIAA_PHYSADDR - 1 + 0xb00, 0x100); + return platform_driver_probe(&amikbd_driver, amikbd_probe); } module_init(amikbd_init); + +static void __exit amikbd_exit(void) +{ + platform_driver_unregister(&amikbd_driver); +} + module_exit(amikbd_exit); + +MODULE_ALIAS("platform:amiga-keyboard"); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 48cdabec372a..c44b9eafc556 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -80,6 +80,16 @@ config INPUT_M68K_BEEP tristate "M68k Beeper support" depends on M68K +config INPUT_MAX8925_ONKEY + tristate "MAX8925 ONKEY support" + depends on MFD_MAX8925 + help + Support the ONKEY of MAX8925 PMICs as an input device + reporting power button status. + + To compile this driver as a module, choose M here: the module + will be called max8925_onkey. + config INPUT_APANEL tristate "Fujitsu Lifebook Application Panel buttons" depends on X86 && I2C && LEDS_CLASS diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index f9f577031e06..71fe57d8023f 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o +obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c new file mode 100644 index 000000000000..80af44608018 --- /dev/null +++ b/drivers/input/misc/max8925_onkey.c @@ -0,0 +1,148 @@ +/** + * max8925_onkey.c - MAX8925 ONKEY driver + * + * Copyright (C) 2009 Marvell International Ltd. + * Haojian Zhuang + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define HARDRESET_EN (1 << 7) +#define PWREN_EN (1 << 7) + +struct max8925_onkey_info { + struct input_dev *idev; + struct i2c_client *i2c; + int irq; +}; + +/* + * MAX8925 gives us an interrupt when ONKEY is held for 3 seconds. + * max8925_set_bits() operates I2C bus and may sleep. So implement + * it in thread IRQ handler. + */ +static irqreturn_t max8925_onkey_handler(int irq, void *data) +{ + struct max8925_onkey_info *info = data; + + input_report_key(info->idev, KEY_POWER, 1); + input_sync(info->idev); + + /* Enable hardreset to halt if system isn't shutdown on time */ + max8925_set_bits(info->i2c, MAX8925_SYSENSEL, + HARDRESET_EN, HARDRESET_EN); + + return IRQ_HANDLED; +} + +static int __devinit max8925_onkey_probe(struct platform_device *pdev) +{ + struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); + struct max8925_onkey_info *info; + int error; + + info = kzalloc(sizeof(struct max8925_onkey_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->i2c = chip->i2c; + info->irq = chip->irq_base + MAX8925_IRQ_GPM_SW_3SEC; + + info->idev = input_allocate_device(); + if (!info->idev) { + dev_err(chip->dev, "Failed to allocate input dev\n"); + error = -ENOMEM; + goto out_input; + } + + info->idev->name = "max8925_on"; + info->idev->phys = "max8925_on/input0"; + info->idev->id.bustype = BUS_I2C; + info->idev->dev.parent = &pdev->dev; + info->idev->evbit[0] = BIT_MASK(EV_KEY); + info->idev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); + + error = request_threaded_irq(info->irq, NULL, max8925_onkey_handler, + IRQF_ONESHOT, "onkey", info); + if (error < 0) { + dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", + info->irq, error); + goto out_irq; + } + + error = input_register_device(info->idev); + if (error) { + dev_err(chip->dev, "Can't register input device: %d\n", error); + goto out; + } + + platform_set_drvdata(pdev, info); + + return 0; + +out: + free_irq(info->irq, info); +out_irq: + input_free_device(info->idev); +out_input: + kfree(info); + return error; +} + +static int __devexit max8925_onkey_remove(struct platform_device *pdev) +{ + struct max8925_onkey_info *info = platform_get_drvdata(pdev); + + free_irq(info->irq, info); + input_unregister_device(info->idev); + kfree(info); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver max8925_onkey_driver = { + .driver = { + .name = "max8925-onkey", + .owner = THIS_MODULE, + }, + .probe = max8925_onkey_probe, + .remove = __devexit_p(max8925_onkey_remove), +}; + +static int __init max8925_onkey_init(void) +{ + return platform_driver_register(&max8925_onkey_driver); +} +module_init(max8925_onkey_init); + +static void __exit max8925_onkey_exit(void) +{ + platform_driver_unregister(&max8925_onkey_driver); +} +module_exit(max8925_onkey_exit); + +MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver"); +MODULE_AUTHOR("Haojian Zhuang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fee9eac8e04a..4f9b2afc24e8 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -90,8 +90,8 @@ static void vibra_disable(struct vibra_info *info) twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, (reg & ~TWL4030_VIBRA_EN), TWL4030_REG_VIBRA_CTL); - twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER); twl4030_codec_disable_resource(TWL4030_CODEC_RES_APLL); + twl4030_codec_disable_resource(TWL4030_CODEC_RES_POWER); info->enabled = false; } diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 1477466076ad..b71eb55f2dbc 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -300,7 +300,7 @@ static int uinput_validate_absbits(struct input_dev *dev) unsigned int cnt; int retval = 0; - for (cnt = 0; cnt < ABS_MAX + 1; cnt++) { + for (cnt = 0; cnt < ABS_CNT; cnt++) { if (!test_bit(cnt, dev->absbit)) continue; @@ -387,7 +387,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - size = sizeof(int) * (ABS_MAX + 1); + size = sizeof(int) * ABS_CNT; memcpy(dev->absmax, user_dev->absmax, size); memcpy(dev->absmin, user_dev->absmin, size); memcpy(dev->absfuzz, user_dev->absfuzz, size); diff --git a/drivers/input/mouse/amimouse.c b/drivers/input/mouse/amimouse.c index a185ac78a42c..ff5f61a0fd3a 100644 --- a/drivers/input/mouse/amimouse.c +++ b/drivers/input/mouse/amimouse.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -34,10 +35,10 @@ MODULE_DESCRIPTION("Amiga mouse driver"); MODULE_LICENSE("GPL"); static int amimouse_lastx, amimouse_lasty; -static struct input_dev *amimouse_dev; -static irqreturn_t amimouse_interrupt(int irq, void *dummy) +static irqreturn_t amimouse_interrupt(int irq, void *data) { + struct input_dev *dev = data; unsigned short joy0dat, potgor; int nx, ny, dx, dy; @@ -59,14 +60,14 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy) potgor = amiga_custom.potgor; - input_report_rel(amimouse_dev, REL_X, dx); - input_report_rel(amimouse_dev, REL_Y, dy); + input_report_rel(dev, REL_X, dx); + input_report_rel(dev, REL_Y, dy); - input_report_key(amimouse_dev, BTN_LEFT, ciaa.pra & 0x40); - input_report_key(amimouse_dev, BTN_MIDDLE, potgor & 0x0100); - input_report_key(amimouse_dev, BTN_RIGHT, potgor & 0x0400); + input_report_key(dev, BTN_LEFT, ciaa.pra & 0x40); + input_report_key(dev, BTN_MIDDLE, potgor & 0x0100); + input_report_key(dev, BTN_RIGHT, potgor & 0x0400); - input_sync(amimouse_dev); + input_sync(dev); return IRQ_HANDLED; } @@ -74,63 +75,90 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy) static int amimouse_open(struct input_dev *dev) { unsigned short joy0dat; + int error; joy0dat = amiga_custom.joy0dat; amimouse_lastx = joy0dat & 0xff; amimouse_lasty = joy0dat >> 8; - if (request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", amimouse_interrupt)) { - printk(KERN_ERR "amimouse.c: Can't allocate irq %d\n", IRQ_AMIGA_VERTB); - return -EBUSY; - } + error = request_irq(IRQ_AMIGA_VERTB, amimouse_interrupt, 0, "amimouse", + dev); + if (error) + dev_err(&dev->dev, "Can't allocate irq %d\n", IRQ_AMIGA_VERTB); - return 0; + return error; } static void amimouse_close(struct input_dev *dev) { - free_irq(IRQ_AMIGA_VERTB, amimouse_interrupt); + free_irq(IRQ_AMIGA_VERTB, dev); } -static int __init amimouse_init(void) +static int __init amimouse_probe(struct platform_device *pdev) { int err; + struct input_dev *dev; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_MOUSE)) - return -ENODEV; - - amimouse_dev = input_allocate_device(); - if (!amimouse_dev) + dev = input_allocate_device(); + if (!dev) return -ENOMEM; - amimouse_dev->name = "Amiga mouse"; - amimouse_dev->phys = "amimouse/input0"; - amimouse_dev->id.bustype = BUS_AMIGA; - amimouse_dev->id.vendor = 0x0001; - amimouse_dev->id.product = 0x0002; - amimouse_dev->id.version = 0x0100; + dev->name = pdev->name; + dev->phys = "amimouse/input0"; + dev->id.bustype = BUS_AMIGA; + dev->id.vendor = 0x0001; + dev->id.product = 0x0002; + dev->id.version = 0x0100; - amimouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); - amimouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); - amimouse_dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | + dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); + dev->keybit[BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); - amimouse_dev->open = amimouse_open; - amimouse_dev->close = amimouse_close; + dev->open = amimouse_open; + dev->close = amimouse_close; + dev->dev.parent = &pdev->dev; - err = input_register_device(amimouse_dev); + err = input_register_device(dev); if (err) { - input_free_device(amimouse_dev); + input_free_device(dev); return err; } + platform_set_drvdata(pdev, dev); + return 0; } -static void __exit amimouse_exit(void) +static int __exit amimouse_remove(struct platform_device *pdev) { - input_unregister_device(amimouse_dev); + struct input_dev *dev = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + input_unregister_device(dev); + return 0; +} + +static struct platform_driver amimouse_driver = { + .remove = __exit_p(amimouse_remove), + .driver = { + .name = "amiga-mouse", + .owner = THIS_MODULE, + }, +}; + +static int __init amimouse_init(void) +{ + return platform_driver_probe(&amimouse_driver, amimouse_probe); } module_init(amimouse_init); + +static void __exit amimouse_exit(void) +{ + platform_driver_unregister(&amimouse_driver); +} + module_exit(amimouse_exit); + +MODULE_ALIAS("platform:amiga-mouse"); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index b9f58ca82fd1..6703c6b9800a 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -590,4 +590,17 @@ config TOUCHSCREEN_PCAP To compile this driver as a module, choose M here: the module will be called pcap_ts. + +config TOUCHSCREEN_TPS6507X + tristate "TPS6507x based touchscreens" + depends on I2C + help + Say Y here if you have a TPS6507x based touchscreen + controller. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called tps6507x_ts. + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 8ad36eef90a2..497964a7a214 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o +obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 532279cda0e4..634f6f6b9b13 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1163,8 +1163,8 @@ static int __devinit ads7846_probe(struct spi_device *spi) ts->reg = regulator_get(&spi->dev, "vcc"); if (IS_ERR(ts->reg)) { - dev_err(&spi->dev, "unable to get regulator: %ld\n", - PTR_ERR(ts->reg)); + err = PTR_ERR(ts->reg); + dev_err(&spi->dev, "unable to get regulator: %ld\n", err); goto err_free_gpio; } diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c index e0b7c834111d..ac5d0f9b0cb1 100644 --- a/drivers/input/touchscreen/s3c2410_ts.c +++ b/drivers/input/touchscreen/s3c2410_ts.c @@ -413,6 +413,8 @@ static struct dev_pm_ops s3c_ts_pmops = { #endif static struct platform_device_id s3cts_driver_ids[] = { + { "s3c2410-ts", 0 }, + { "s3c2440-ts", 0 }, { "s3c64xx-ts", FEAT_PEN_IRQ }, { } }; diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c new file mode 100644 index 000000000000..5de80a1a730b --- /dev/null +++ b/drivers/input/touchscreen/tps6507x-ts.c @@ -0,0 +1,400 @@ +/* + * drivers/input/touchscreen/tps6507x_ts.c + * + * Touchscreen driver for the tps6507x chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * Credits: + * + * Using code from tsc2007, MtekVision Co., Ltd. + * + * For licencing details see kernel-base/COPYING + * + * TPS65070, TPS65073, TPS650731, and TPS650732 support + * 10 bit touch screen interface. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */ +#define TPS_DEFAULT_MIN_PRESSURE 0x30 +#define MAX_10BIT ((1 << 10) - 1) + +#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \ + TPS6507X_ADCONFIG_START_CONVERSION | \ + TPS6507X_ADCONFIG_INPUT_REAL_TSC) +#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC) + +struct ts_event { + u16 x; + u16 y; + u16 pressure; +}; + +struct tps6507x_ts { + struct input_dev *input_dev; + struct device *dev; + char phys[32]; + struct workqueue_struct *wq; + struct delayed_work work; + unsigned polling; /* polling is active */ + struct ts_event tc; + struct tps6507x_dev *mfd; + u16 model; + unsigned pendown; + int irq; + void (*clear_penirq)(void); + unsigned long poll_period; /* ms */ + u16 min_pressure; + int vref; /* non-zero to leave vref on */ +}; + +static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data) +{ + int err; + + err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data); + + if (err) + return err; + + return 0; +} + +static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data) +{ + return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data); +} + +static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc, + u8 tsc_mode, u16 *value) +{ + s32 ret; + u8 adc_status; + u8 result; + + /* Route input signal to A/D converter */ + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode); + if (ret) { + dev_err(tsc->dev, "TSC mode read failed\n"); + goto err; + } + + /* Start A/D conversion */ + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, + TPS6507X_ADCONFIG_CONVERT_TS); + if (ret) { + dev_err(tsc->dev, "ADC config write failed\n"); + return ret; + } + + do { + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG, + &adc_status); + if (ret) { + dev_err(tsc->dev, "ADC config read failed\n"); + goto err; + } + } while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION); + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result); + if (ret) { + dev_err(tsc->dev, "ADC result 2 read failed\n"); + goto err; + } + + *value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8; + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result); + if (ret) { + dev_err(tsc->dev, "ADC result 1 read failed\n"); + goto err; + } + + *value |= result; + + dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value); + +err: + return ret; +} + +/* Need to call tps6507x_adc_standby() after using A/D converter for the + * touch screen interrupt to work properly. + */ + +static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc) +{ + s32 ret; + s32 loops = 0; + u8 val; + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG, + TPS6507X_ADCONFIG_INPUT_TSC); + if (ret) + return ret; + + ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, + TPS6507X_TSCMODE_STANDBY); + if (ret) + return ret; + + ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); + if (ret) + return ret; + + while (val & TPS6507X_REG_TSC_INT) { + mdelay(10); + ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val); + if (ret) + return ret; + loops++; + } + + return ret; +} + +static void tps6507x_ts_handler(struct work_struct *work) +{ + struct tps6507x_ts *tsc = container_of(work, + struct tps6507x_ts, work.work); + struct input_dev *input_dev = tsc->input_dev; + int pendown; + int schd; + int poll = 0; + s32 ret; + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE, + &tsc->tc.pressure); + if (ret) + goto done; + + pendown = tsc->tc.pressure > tsc->min_pressure; + + if (unlikely(!pendown && tsc->pendown)) { + dev_dbg(tsc->dev, "UP\n"); + input_report_key(input_dev, BTN_TOUCH, 0); + input_report_abs(input_dev, ABS_PRESSURE, 0); + input_sync(input_dev); + tsc->pendown = 0; + } + + if (pendown) { + + if (!tsc->pendown) { + dev_dbg(tsc->dev, "DOWN\n"); + input_report_key(input_dev, BTN_TOUCH, 1); + } else + dev_dbg(tsc->dev, "still down\n"); + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION, + &tsc->tc.x); + if (ret) + goto done; + + ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION, + &tsc->tc.y); + if (ret) + goto done; + + input_report_abs(input_dev, ABS_X, tsc->tc.x); + input_report_abs(input_dev, ABS_Y, tsc->tc.y); + input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure); + input_sync(input_dev); + tsc->pendown = 1; + poll = 1; + } + +done: + /* always poll if not using interrupts */ + poll = 1; + + if (poll) { + schd = queue_delayed_work(tsc->wq, &tsc->work, + tsc->poll_period * HZ / 1000); + if (schd) + tsc->polling = 1; + else { + tsc->polling = 0; + dev_err(tsc->dev, "re-schedule failed"); + } + } else + tsc->polling = 0; + + ret = tps6507x_adc_standby(tsc); +} + +static int tps6507x_ts_probe(struct platform_device *pdev) +{ + int error; + struct tps6507x_ts *tsc; + struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); + struct touchscreen_init_data *init_data; + struct input_dev *input_dev; + struct tps6507x_board *tps_board; + int schd; + + /** + * tps_board points to pmic related constants + * coming from the board-evm file. + */ + + tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data; + + if (!tps_board) { + dev_err(tps6507x_dev->dev, + "Could not find tps6507x platform data\n"); + return -EIO; + } + + /** + * init_data points to array of regulator_init structures + * coming from the board-evm file. + */ + + init_data = tps_board->tps6507x_ts_init_data; + + tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL); + if (!tsc) { + dev_err(tps6507x_dev->dev, "failed to allocate driver data\n"); + error = -ENOMEM; + goto err0; + } + + tps6507x_dev->ts = tsc; + tsc->mfd = tps6507x_dev; + tsc->dev = tps6507x_dev->dev; + input_dev = input_allocate_device(); + if (!input_dev) { + dev_err(tsc->dev, "Failed to allocate input device.\n"); + error = -ENOMEM; + goto err1; + } + + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + + input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0); + input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0); + + input_dev->name = "TPS6507x Touchscreen"; + input_dev->id.bustype = BUS_I2C; + input_dev->dev.parent = tsc->dev; + + snprintf(tsc->phys, sizeof(tsc->phys), + "%s/input0", dev_name(tsc->dev)); + input_dev->phys = tsc->phys; + + dev_dbg(tsc->dev, "device: %s\n", input_dev->phys); + + input_set_drvdata(input_dev, tsc); + + tsc->input_dev = input_dev; + + INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); + tsc->wq = create_workqueue("TPS6507x Touchscreen"); + + if (init_data) { + tsc->poll_period = init_data->poll_period; + tsc->vref = init_data->vref; + tsc->min_pressure = init_data->min_pressure; + input_dev->id.vendor = init_data->vendor; + input_dev->id.product = init_data->product; + input_dev->id.version = init_data->version; + } else { + tsc->poll_period = TSC_DEFAULT_POLL_PERIOD; + tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE; + } + + error = tps6507x_adc_standby(tsc); + if (error) + goto err2; + + error = input_register_device(input_dev); + if (error) + goto err2; + + schd = queue_delayed_work(tsc->wq, &tsc->work, + tsc->poll_period * HZ / 1000); + + if (schd) + tsc->polling = 1; + else { + tsc->polling = 0; + dev_err(tsc->dev, "schedule failed"); + goto err2; + } + + return 0; + +err2: + cancel_delayed_work(&tsc->work); + flush_workqueue(tsc->wq); + destroy_workqueue(tsc->wq); + tsc->wq = 0; + input_free_device(input_dev); +err1: + kfree(tsc); + tps6507x_dev->ts = NULL; +err0: + return error; +} + +static int __devexit tps6507x_ts_remove(struct platform_device *pdev) +{ + struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); + struct tps6507x_ts *tsc = tps6507x_dev->ts; + struct input_dev *input_dev = tsc->input_dev; + + if (!tsc) + return 0; + + cancel_delayed_work(&tsc->work); + flush_workqueue(tsc->wq); + destroy_workqueue(tsc->wq); + tsc->wq = 0; + + input_free_device(input_dev); + + tps6507x_dev->ts = NULL; + kfree(tsc); + + return 0; +} + +static struct platform_driver tps6507x_ts_driver = { + .driver = { + .name = "tps6507x-ts", + .owner = THIS_MODULE, + }, + .probe = tps6507x_ts_probe, + .remove = __devexit_p(tps6507x_ts_remove), +}; + +static int __init tps6507x_ts_init(void) +{ + return platform_driver_register(&tps6507x_ts_driver); +} +module_init(tps6507x_ts_init); + +static void __exit tps6507x_ts_exit(void) +{ + platform_driver_unregister(&tps6507x_ts_driver); +} +module_exit(tps6507x_ts_exit); + +MODULE_AUTHOR("Todd Fischer "); +MODULE_DESCRIPTION("TPS6507x - TouchScreen driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:tps6507x-tsc"); diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index 29a8bbf3f086..567d57215c28 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -857,6 +857,11 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt) if ((pkt[0] & 0xe0) != 0xe0) return 0; + if (be16_to_cpu(packet->data_len) > 0xff) + packet->data_len = cpu_to_be16(be16_to_cpu(packet->data_len) - 0x100); + if (be16_to_cpu(packet->x_len) > 0xff) + packet->x_len = cpu_to_be16(be16_to_cpu(packet->x_len) - 0x80); + /* send ACK */ ret = usb_submit_urb(priv->ack, GFP_ATOMIC); @@ -1112,7 +1117,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { #ifdef CONFIG_TOUCHSCREEN_USB_NEXIO [DEVTYPE_NEXIO] = { - .rept_size = 128, + .rept_size = 1024, .irq_always = true, .read_data = nexio_read_data, .init = nexio_init, diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index b3b7e2879bac..8700474747e8 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -97,8 +97,10 @@ static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val) hw->name, __func__, reg, val); spin_lock(&hw->ctrl_lock); - if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) + if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) { + spin_unlock(&hw->ctrl_lock); return 1; + } buf = &hw->ctrl_buff[hw->ctrl_in_idx]; buf->hfcs_reg = reg; buf->reg_val = val; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 0a3553df065f..54ae71a907f9 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -320,12 +320,12 @@ inittiger(struct tiger_hw *card) return -ENOMEM; } for (i = 0; i < 2; i++) { - card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL); + card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC); if (!card->bc[i].hsbuf) { pr_info("%s: no B%d send buffer\n", card->name, i + 1); return -ENOMEM; } - card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL); + card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC); if (!card->bc[i].hrbuf) { pr_info("%s: no B%d recv buffer\n", card->name, i + 1); return -ENOMEM; diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index c3243c913ec0..81048b8ed8ad 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -98,8 +98,6 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off) if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, filep, buf, (int)count, off); - if (*off != filep->f_pos) - return -ESPIPE; if (list_empty(&dev->expired) && (dev->work == 0)) { if (filep->f_flags & O_NONBLOCK) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 505eb64c329c..81bf25e67ce1 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -21,7 +21,7 @@ comment "LED drivers" config LEDS_88PM860X tristate "LED Support for Marvell 88PM860x PMIC" - depends on LEDS_CLASS && MFD_88PM860X + depends on MFD_88PM860X help This option enables support for on-chip LED drivers found on Marvell Semiconductor 88PM8606 PMIC. @@ -67,6 +67,16 @@ config LEDS_NET48XX This option enables support for the Soekris net4801 and net4826 error LED. +config LEDS_NET5501 + tristate "LED Support for Soekris net5501 series Error LED" + depends on LEDS_TRIGGERS + depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535 + select LEDS_TRIGGER_DEFAULT_ON + default n + help + Add support for the Soekris net5501 board (detection, error led + and GPIO). + config LEDS_FSG tristate "LED Support for the Freecom FSG-3" depends on MACH_FSG @@ -285,6 +295,13 @@ config LEDS_DELL_NETBOOKS This adds support for the Latitude 2100 and similar notebooks that have an external LED. +config LEDS_MC13783 + tristate "LED Support for MC13783 PMIC" + depends on MFD_MC13783 + help + This option enable support for on-chip LED drivers found + on Freescale Semiconductor MC13783 PMIC. + config LEDS_TRIGGERS bool "LED Trigger support" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 0cd8b9957380..2493de499374 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o +obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o obj-$(CONFIG_LEDS_H1940) += leds-h1940.o @@ -35,6 +36,7 @@ obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o +obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 69e7d86a5143..260660076507 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -74,7 +74,7 @@ static ssize_t led_max_brightness_show(struct device *dev, static struct device_attribute led_class_attrs[] = { __ATTR(brightness, 0644, led_brightness_show, led_brightness_store), - __ATTR(max_brightness, 0644, led_max_brightness_show, NULL), + __ATTR(max_brightness, 0444, led_max_brightness_show, NULL), #ifdef CONFIG_LEDS_TRIGGERS __ATTR(trigger, 0644, led_trigger_show, led_trigger_store), #endif diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 16a60c06c96c..b7677106cff8 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -256,8 +256,10 @@ static int pm860x_led_probe(struct platform_device *pdev) if (pdev->dev.parent->platform_data) { pm860x_pdata = pdev->dev.parent->platform_data; pdata = pm860x_pdata->led; - } else - pdata = NULL; + } else { + dev_err(&pdev->dev, "missing platform data\n"); + return -EINVAL; + } data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL); if (data == NULL) @@ -268,8 +270,11 @@ static int pm860x_led_probe(struct platform_device *pdev) data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion; data->iset = pdata->iset; data->port = __check_device(pdata, data->name); - if (data->port < 0) + if (data->port < 0) { + dev_err(&pdev->dev, "check device failed\n"); + kfree(data); return -EINVAL; + } data->current_brightness = 0; data->cdev.name = data->name; diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 6d94b0b9979c..cc22eeefa10b 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -26,7 +26,8 @@ struct gpio_led_data { u8 new_level; u8 can_sleep; u8 active_low; - int (*platform_gpio_blink_set)(unsigned gpio, + u8 blinking; + int (*platform_gpio_blink_set)(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off); }; @@ -35,7 +36,13 @@ static void gpio_led_work(struct work_struct *work) struct gpio_led_data *led_dat = container_of(work, struct gpio_led_data, work); - gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); + if (led_dat->blinking) { + led_dat->platform_gpio_blink_set(led_dat->gpio, + led_dat->new_level, + NULL, NULL); + led_dat->blinking = 0; + } else + gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); } static void gpio_led_set(struct led_classdev *led_cdev, @@ -60,8 +67,14 @@ static void gpio_led_set(struct led_classdev *led_cdev, if (led_dat->can_sleep) { led_dat->new_level = level; schedule_work(&led_dat->work); - } else - gpio_set_value(led_dat->gpio, level); + } else { + if (led_dat->blinking) { + led_dat->platform_gpio_blink_set(led_dat->gpio, level, + NULL, NULL); + led_dat->blinking = 0; + } else + gpio_set_value(led_dat->gpio, level); + } } static int gpio_blink_set(struct led_classdev *led_cdev, @@ -70,12 +83,14 @@ static int gpio_blink_set(struct led_classdev *led_cdev, struct gpio_led_data *led_dat = container_of(led_cdev, struct gpio_led_data, cdev); - return led_dat->platform_gpio_blink_set(led_dat->gpio, delay_on, delay_off); + led_dat->blinking = 1; + return led_dat->platform_gpio_blink_set(led_dat->gpio, GPIO_LED_BLINK, + delay_on, delay_off); } static int __devinit create_gpio_led(const struct gpio_led *template, struct gpio_led_data *led_dat, struct device *parent, - int (*blink_set)(unsigned, unsigned long *, unsigned long *)) + int (*blink_set)(unsigned, int, unsigned long *, unsigned long *)) { int ret, state; @@ -97,6 +112,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, led_dat->gpio = template->gpio; led_dat->can_sleep = gpio_cansleep(template->gpio); led_dat->active_low = template->active_low; + led_dat->blinking = 0; if (blink_set) { led_dat->platform_gpio_blink_set = blink_set; led_dat->cdev.blink_set = gpio_blink_set; @@ -113,7 +129,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); if (ret < 0) goto err; - + INIT_WORK(&led_dat->work, gpio_led_work); ret = led_classdev_register(parent, &led_dat->cdev); diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c index 8d5ecceba181..932a58da76c4 100644 --- a/drivers/leds/leds-lp3944.c +++ b/drivers/leds/leds-lp3944.c @@ -379,6 +379,7 @@ static int __devinit lp3944_probe(struct i2c_client *client, { struct lp3944_platform_data *lp3944_pdata = client->dev.platform_data; struct lp3944_data *data; + int err; if (lp3944_pdata == NULL) { dev_err(&client->dev, "no platform data\n"); @@ -401,9 +402,13 @@ static int __devinit lp3944_probe(struct i2c_client *client, mutex_init(&data->lock); - dev_info(&client->dev, "lp3944 enabled\n"); + err = lp3944_configure(client, data, lp3944_pdata); + if (err < 0) { + kfree(data); + return err; + } - lp3944_configure(client, data, lp3944_pdata); + dev_info(&client->dev, "lp3944 enabled\n"); return 0; } diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c new file mode 100644 index 000000000000..f05bb08d0f09 --- /dev/null +++ b/drivers/leds/leds-mc13783.c @@ -0,0 +1,403 @@ +/* + * LEDs driver for Freescale MC13783 + * + * Copyright (C) 2010 Philippe Rétornaz + * + * Based on leds-da903x: + * Copyright (C) 2008 Compulab, Ltd. + * Mike Rapoport + * + * Copyright (C) 2006-2008 Marvell International Ltd. + * Eric Miao + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct mc13783_led { + struct led_classdev cdev; + struct work_struct work; + struct mc13783 *master; + enum led_brightness new_brightness; + int id; +}; + +#define MC13783_REG_LED_CONTROL_0 51 +#define MC13783_LED_C0_ENABLE_BIT (1 << 0) +#define MC13783_LED_C0_TRIODE_MD_BIT (1 << 7) +#define MC13783_LED_C0_TRIODE_AD_BIT (1 << 8) +#define MC13783_LED_C0_TRIODE_KP_BIT (1 << 9) +#define MC13783_LED_C0_BOOST_BIT (1 << 10) +#define MC13783_LED_C0_ABMODE_MASK 0x7 +#define MC13783_LED_C0_ABMODE 11 +#define MC13783_LED_C0_ABREF_MASK 0x3 +#define MC13783_LED_C0_ABREF 14 + +#define MC13783_REG_LED_CONTROL_1 52 +#define MC13783_LED_C1_TC1HALF_BIT (1 << 18) + +#define MC13783_REG_LED_CONTROL_2 53 +#define MC13783_LED_C2_BL_P_MASK 0xf +#define MC13783_LED_C2_MD_P 9 +#define MC13783_LED_C2_AD_P 13 +#define MC13783_LED_C2_KP_P 17 +#define MC13783_LED_C2_BL_C_MASK 0x7 +#define MC13783_LED_C2_MD_C 0 +#define MC13783_LED_C2_AD_C 3 +#define MC13783_LED_C2_KP_C 6 + +#define MC13783_REG_LED_CONTROL_3 54 +#define MC13783_LED_C3_TC_P 6 +#define MC13783_LED_C3_TC_P_MASK 0x1f + +#define MC13783_REG_LED_CONTROL_4 55 +#define MC13783_REG_LED_CONTROL_5 56 + +#define MC13783_LED_Cx_PERIOD 21 +#define MC13783_LED_Cx_PERIOD_MASK 0x3 +#define MC13783_LED_Cx_SLEWLIM_BIT (1 << 23) +#define MC13783_LED_Cx_TRIODE_TC_BIT (1 << 23) +#define MC13783_LED_Cx_TC_C_MASK 0x3 + +static void mc13783_led_work(struct work_struct *work) +{ + struct mc13783_led *led = container_of(work, struct mc13783_led, work); + int reg = 0; + int mask = 0; + int value = 0; + int bank, off, shift; + + switch (led->id) { + case MC13783_LED_MD: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_MD_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_MD_P; + break; + case MC13783_LED_AD: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_AD_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_AD_P; + break; + case MC13783_LED_KP: + reg = MC13783_REG_LED_CONTROL_2; + mask = MC13783_LED_C2_BL_P_MASK << MC13783_LED_C2_KP_P; + value = (led->new_brightness >> 4) << MC13783_LED_C2_KP_P; + break; + case MC13783_LED_R1: + case MC13783_LED_G1: + case MC13783_LED_B1: + case MC13783_LED_R2: + case MC13783_LED_G2: + case MC13783_LED_B2: + case MC13783_LED_R3: + case MC13783_LED_G3: + case MC13783_LED_B3: + off = led->id - MC13783_LED_R1; + bank = off/3; + reg = MC13783_REG_LED_CONTROL_3 + off/3; + shift = (off - bank * 3) * 5 + MC13783_LED_C3_TC_P; + value = (led->new_brightness >> 3) << shift; + mask = MC13783_LED_C3_TC_P_MASK << shift; + break; + } + + mc13783_lock(led->master); + + mc13783_reg_rmw(led->master, reg, mask, value); + + mc13783_unlock(led->master); +} + +static void mc13783_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct mc13783_led *led; + + led = container_of(led_cdev, struct mc13783_led, cdev); + led->new_brightness = value; + schedule_work(&led->work); +} + +static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current) +{ + int shift = 0; + int mask = 0; + int value = 0; + int reg = 0; + int ret, bank; + + switch (led->id) { + case MC13783_LED_MD: + shift = MC13783_LED_C2_MD_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_AD: + shift = MC13783_LED_C2_AD_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_KP: + shift = MC13783_LED_C2_KP_C; + mask = MC13783_LED_C2_BL_C_MASK; + value = max_current & MC13783_LED_C2_BL_C_MASK; + reg = MC13783_REG_LED_CONTROL_2; + break; + case MC13783_LED_R1: + case MC13783_LED_G1: + case MC13783_LED_B1: + case MC13783_LED_R2: + case MC13783_LED_G2: + case MC13783_LED_B2: + case MC13783_LED_R3: + case MC13783_LED_G3: + case MC13783_LED_B3: + bank = (led->id - MC13783_LED_R1)/3; + reg = MC13783_REG_LED_CONTROL_3 + bank; + shift = ((led->id - MC13783_LED_R1) - bank * 3) * 2; + mask = MC13783_LED_Cx_TC_C_MASK; + value = max_current & MC13783_LED_Cx_TC_C_MASK; + break; + } + + mc13783_lock(led->master); + + ret = mc13783_reg_rmw(led->master, reg, mask << shift, + value << shift); + + mc13783_unlock(led->master); + return ret; +} + +static int __devinit mc13783_leds_prepare(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); + int ret = 0; + int reg = 0; + + mc13783_lock(dev); + + if (pdata->flags & MC13783_LED_TC1HALF) + reg |= MC13783_LED_C1_TC1HALF_BIT; + + if (pdata->flags & MC13783_LED_SLEWLIMTC) + reg |= MC13783_LED_Cx_SLEWLIM_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, reg); + if (ret) + goto out; + + reg = (pdata->bl_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_SLEWLIMBL) + reg |= MC13783_LED_Cx_SLEWLIM_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, reg); + if (ret) + goto out; + + reg = (pdata->tc1_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC1) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, reg); + if (ret) + goto out; + + reg = (pdata->tc2_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC2) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, reg); + if (ret) + goto out; + + reg = (pdata->tc3_period & MC13783_LED_Cx_PERIOD_MASK) << + MC13783_LED_Cx_PERIOD; + + if (pdata->flags & MC13783_LED_TRIODE_TC3) + reg |= MC13783_LED_Cx_TRIODE_TC_BIT;; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, reg); + if (ret) + goto out; + + reg = MC13783_LED_C0_ENABLE_BIT; + if (pdata->flags & MC13783_LED_TRIODE_MD) + reg |= MC13783_LED_C0_TRIODE_MD_BIT; + if (pdata->flags & MC13783_LED_TRIODE_AD) + reg |= MC13783_LED_C0_TRIODE_AD_BIT; + if (pdata->flags & MC13783_LED_TRIODE_KP) + reg |= MC13783_LED_C0_TRIODE_KP_BIT; + if (pdata->flags & MC13783_LED_BOOST_EN) + reg |= MC13783_LED_C0_BOOST_BIT; + + reg |= (pdata->abmode & MC13783_LED_C0_ABMODE_MASK) << + MC13783_LED_C0_ABMODE; + reg |= (pdata->abref & MC13783_LED_C0_ABREF_MASK) << + MC13783_LED_C0_ABREF; + + ret = mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, reg); + +out: + mc13783_unlock(dev); + return ret; +} + +static int __devinit mc13783_led_probe(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783_led_platform_data *led_cur; + struct mc13783_led *led, *led_dat; + int ret, i; + int init_led = 0; + + if (pdata == NULL) { + dev_err(&pdev->dev, "missing platform data\n"); + return -ENODEV; + } + + if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) { + dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds); + return -EINVAL; + } + + led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); + if (led == NULL) { + dev_err(&pdev->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + + ret = mc13783_leds_prepare(pdev); + if (ret) { + dev_err(&pdev->dev, "unable to init led driver\n"); + goto err_free; + } + + for (i = 0; i < pdata->num_leds; i++) { + led_dat = &led[i]; + led_cur = &pdata->led[i]; + + if (led_cur->id > MC13783_LED_MAX || led_cur->id < 0) { + dev_err(&pdev->dev, "invalid id %d\n", led_cur->id); + ret = -EINVAL; + goto err_register; + } + + if (init_led & (1 << led_cur->id)) { + dev_err(&pdev->dev, "led %d already initialized\n", + led_cur->id); + ret = -EINVAL; + goto err_register; + } + + init_led |= 1 << led_cur->id; + led_dat->cdev.name = led_cur->name; + led_dat->cdev.default_trigger = led_cur->default_trigger; + led_dat->cdev.brightness_set = mc13783_led_set; + led_dat->cdev.brightness = LED_OFF; + led_dat->id = led_cur->id; + led_dat->master = dev_get_drvdata(pdev->dev.parent); + + INIT_WORK(&led_dat->work, mc13783_led_work); + + ret = led_classdev_register(pdev->dev.parent, &led_dat->cdev); + if (ret) { + dev_err(&pdev->dev, "failed to register led %d\n", + led_dat->id); + goto err_register; + } + + ret = mc13783_led_setup(led_dat, led_cur->max_current); + if (ret) { + dev_err(&pdev->dev, "unable to init led %d\n", + led_dat->id); + i++; + goto err_register; + } + } + + platform_set_drvdata(pdev, led); + return 0; + +err_register: + for (i = i - 1; i >= 0; i--) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + +err_free: + kfree(led); + return ret; +} + +static int __devexit mc13783_led_remove(struct platform_device *pdev) +{ + struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct mc13783_led *led = platform_get_drvdata(pdev); + struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); + int i; + + for (i = 0; i < pdata->num_leds; i++) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + + mc13783_lock(dev); + + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_0, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_1, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_2, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_3, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_4, 0); + mc13783_reg_write(dev, MC13783_REG_LED_CONTROL_5, 0); + + mc13783_unlock(dev); + + kfree(led); + return 0; +} + +static struct platform_driver mc13783_led_driver = { + .driver = { + .name = "mc13783-led", + .owner = THIS_MODULE, + }, + .probe = mc13783_led_probe, + .remove = __devexit_p(mc13783_led_remove), +}; + +static int __init mc13783_led_init(void) +{ + return platform_driver_register(&mc13783_led_driver); +} +module_init(mc13783_led_init); + +static void __exit mc13783_led_exit(void) +{ + platform_driver_unregister(&mc13783_led_driver); +} +module_exit(mc13783_led_exit); + +MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC"); +MODULE_AUTHOR("Philippe Retornaz "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:mc13783-led"); diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c new file mode 100644 index 000000000000..3063f591f0dc --- /dev/null +++ b/drivers/leds/leds-net5501.c @@ -0,0 +1,94 @@ +/* + * Soekris board support code + * + * Copyright (C) 2008-2009 Tower Technologies + * Written by Alessandro Zummo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct gpio_led net5501_leds[] = { + { + .name = "error", + .gpio = 6, + .default_trigger = "default-on", + }, +}; + +static struct gpio_led_platform_data net5501_leds_data = { + .num_leds = ARRAY_SIZE(net5501_leds), + .leds = net5501_leds, +}; + +static struct platform_device net5501_leds_dev = { + .name = "leds-gpio", + .id = -1, + .dev.platform_data = &net5501_leds_data, +}; + +static void __init init_net5501(void) +{ + platform_device_register(&net5501_leds_dev); +} + +struct soekris_board { + u16 offset; + char *sig; + u8 len; + void (*init)(void); +}; + +static struct soekris_board __initdata boards[] = { + { 0xb7b, "net5501", 7, init_net5501 }, /* net5501 v1.33/1.33c */ + { 0xb1f, "net5501", 7, init_net5501 }, /* net5501 v1.32i */ +}; + +static int __init soekris_init(void) +{ + int i; + unsigned char *rombase, *bios; + + if (!is_geode()) + return 0; + + rombase = ioremap(0xffff0000, 0xffff); + if (!rombase) { + printk(KERN_INFO "Soekris net5501 LED driver failed to get rombase"); + return 0; + } + + bios = rombase + 0x20; /* null terminated */ + + if (strncmp(bios, "comBIOS", 7)) + goto unmap; + + for (i = 0; i < ARRAY_SIZE(boards); i++) { + unsigned char *model = rombase + boards[i].offset; + + if (strncmp(model, boards[i].sig, boards[i].len) == 0) { + printk(KERN_INFO "Soekris %s: %s\n", model, bios); + + if (boards[i].init) + boards[i].init(); + break; + } + } + +unmap: + iounmap(rombase); + return 0; +} + +arch_initcall(soekris_init); diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 51477ec71391..a688293abd0b 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -534,7 +534,7 @@ static int __init nas_gpio_init(void) set_power_light_amber_noblink(); return 0; out_err: - for (; i >= 0; i--) + for (i--; i >= 0; i--) unregister_nasgpio_led(i); pci_unregister_driver(&nas_gpio_pci_driver); return ret; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9ea17d6c799b..d2c0f94fa37d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4645,7 +4645,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, kfree(percpu->scribble); pr_err("%s: failed memory allocation for cpu%ld\n", __func__, cpu); - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } break; case CPU_DEAD: diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index d33693c13368..c4b117f5fb70 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -186,14 +186,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type) if (!dev) return -ENXIO; - ops = kmalloc(kcmd.oplen, GFP_KERNEL); - if (!ops) - return -ENOMEM; - - if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) { - kfree(ops); - return -EFAULT; - } + ops = memdup_user(kcmd.opbuf, kcmd.oplen); + if (IS_ERR(ops)) + return PTR_ERR(ops); /* * It's possible to have a _very_ large table diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 405d2d5183cf..2c65a2c57294 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -566,7 +566,7 @@ out: return ret; } -static void __devexit device_irq_exit(struct pm860x_chip *chip) +static void device_irq_exit(struct pm860x_chip *chip) { if (chip->core_irq) free_irq(chip->core_irq, chip); @@ -703,7 +703,7 @@ out: return; } -int pm860x_device_init(struct pm860x_chip *chip, +int __devinit pm860x_device_init(struct pm860x_chip *chip, struct pm860x_platform_data *pdata) { chip->core_irq = 0; @@ -731,7 +731,7 @@ int pm860x_device_init(struct pm860x_chip *chip, return 0; } -void pm860x_device_exit(struct pm860x_chip *chip) +void __devexit pm860x_device_exit(struct pm860x_chip *chip) { device_irq_exit(chip); mfd_remove_devices(chip->dev); diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c index 4a6e7186334e..c933b64d1283 100644 --- a/drivers/mfd/88pm860x-i2c.c +++ b/drivers/mfd/88pm860x-i2c.c @@ -200,8 +200,8 @@ static int __devexit pm860x_remove(struct i2c_client *client) pm860x_device_exit(chip); i2c_unregister_device(chip->companion); - i2c_set_clientdata(chip->companion, NULL); i2c_set_clientdata(chip->client, NULL); + i2c_set_clientdata(client, NULL); kfree(chip); return 0; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 3c6a9860dd9c..9da0e504bbe9 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -2,8 +2,14 @@ # Multifunction miscellaneous devices # -menu "Multifunction device drivers" +menuconfig MFD_SUPPORT + bool "Multifunction device drivers" depends on HAS_IOMEM + default y + help + Configure MFD device drivers. + +if MFD_SUPPORT config MFD_CORE tristate @@ -116,6 +122,18 @@ config TPS65010 This driver can also be built as a module. If so, the module will be called tps65010. +config TPS6507X + tristate "TPS6507x Power Management / Touch Screen chips" + select MFD_CORE + depends on I2C + help + If you say yes here you get support for the TPS6507x series of + Power Management / Touch Screen chips. These include voltage + regulators, lithium ion/polymer battery charging, touch screen + and other features that are often used in portable devices. + This driver can also be built as a module. If so, the module + will be called tps6507x. + config MENELAUS bool "Texas Instruments TWL92330/Menelaus PM chip" depends on I2C=y && ARCH_OMAP2 @@ -159,6 +177,17 @@ config TWL4030_CODEC select MFD_CORE default n +config MFD_TC35892 + bool "Support Toshiba TC35892" + depends on I2C=y && GENERIC_HARDIRQS + select MFD_CORE + help + Support for the Toshiba TC35892 I/O Expander. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the + functionality of the device. + config MFD_TMIO bool default n @@ -351,9 +380,19 @@ config PCF50633_GPIO Say yes here if you want to include support GPIO for pins on the PCF50633 chip. +config ABX500_CORE + bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" + default y if ARCH_U300 + help + Say yes here if you have the ABX500 Mixed Signal IC family + chips. This core driver expose register access functions. + Functionality specific drivers using these functions can + remain unchanged when IC changes. Binding of the functions to + actual register access is done by the IC core driver. + config AB3100_CORE bool "ST-Ericsson AB3100 Mixed Signal Circuit core functions" - depends on I2C=y + depends on I2C=y && ABX500_CORE default y if ARCH_U300 help Select this to enable the AB3100 Mixed Signal IC core @@ -381,15 +420,30 @@ config EZX_PCAP This enables the PCAP ASIC present on EZX Phones. This is needed for MMC, TouchScreen, Sound, USB, etc.. -config AB4500_CORE - tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip" - depends on SPI +config AB8500_CORE + bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" + depends on SPI=y && GENERIC_HARDIRQS + select MFD_CORE help - Select this option to enable access to AB4500 power management + Select this option to enable access to AB8500 power management chip. This connects to U8500 on the SSP/SPI bus and exports read/write functions for the devices to get access to this chip. This chip embeds various other multimedia funtionalities as well. +config AB3550_CORE + bool "ST-Ericsson AB3550 Mixed Signal Circuit core functions" + select MFD_CORE + depends on I2C=y && GENERIC_HARDIRQS && ABX500_CORE + help + Select this to enable the AB3550 Mixed Signal IC core + functionality. This connects to a AB3550 on the I2C bus + and expose a number of symbols needed for dependent devices + to read and write registers and subscribe to events from + this multi-functional IC. This is needed to use other features + of the AB3550 such as battery-backed RTC, charging control, + LEDs, vibrator, system power and temperature, power management + and ALSA sound. + config MFD_TIMBERDALE tristate "Support for the Timberdale FPGA" select MFD_CORE @@ -409,7 +463,26 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. -endmenu +config MFD_RDC321X + tristate "Support for RDC-R321x southbridge" + select MFD_CORE + depends on PCI + help + Say yes here if you want to have support for the RDC R-321x SoC + southbridge which provides access to GPIOs and Watchdog using the + southbridge PCI device configuration space. + +config MFD_JANZ_CMODIO + tristate "Support for Janz CMOD-IO PCI MODULbus Carrier Board" + select MFD_CORE + depends on PCI + help + This is the core driver for the Janz CMOD-IO PCI MODULbus + carrier board. This device is a PCI to MODULbus bridge which may + host many different types of MODULbus daughterboards, including + CAN and GPIO controllers. + +endif # MFD_SUPPORT menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 87935f967aa0..fb503e77dc60 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o +obj-$(CONFIG_MFD_TC35892) += tc35892.o obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o @@ -29,6 +30,7 @@ obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o obj-$(CONFIG_TPS65010) += tps65010.o +obj-$(CONFIG_TPS6507X) += tps6507x.o obj-$(CONFIG_MENELAUS) += menelaus.o obj-$(CONFIG_TWL4030_CORE) += twl-core.o twl4030-irq.o twl6030-irq.o @@ -55,12 +57,17 @@ obj-$(CONFIG_PMIC_DA903X) += da903x.o max8925-objs := max8925-core.o max8925-i2c.o obj-$(CONFIG_MFD_MAX8925) += max8925.o -obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o +pcf50633-objs := pcf50633-core.o pcf50633-irq.o +obj-$(CONFIG_MFD_PCF50633) += pcf50633.o obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o +obj-$(CONFIG_ABX500_CORE) += abx500-core.o obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o -obj-$(CONFIG_AB4500_CORE) += ab4500-core.o +obj-$(CONFIG_AB3550_CORE) += ab3550-core.o +obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-spi.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o -obj-$(CONFIG_LPC_SCH) += lpc_sch.o \ No newline at end of file +obj-$(CONFIG_LPC_SCH) += lpc_sch.o +obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o +obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index e4ca5909e424..53ebfee548fa 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include /* These are the only registers inside AB3100 used in this main file */ @@ -59,24 +59,15 @@ * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ - -u8 ab3100_get_chip_type(struct ab3100 *ab3100) +static int ab3100_get_chip_id(struct device *dev) { - u8 chip = ABUNKNOWN; - - switch (ab3100->chip_id & 0xf0) { - case 0xa0: - chip = AB3000; - break; - case 0xc0: - chip = AB3100; - break; - } - return chip; + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return (int)ab3100->chip_id; } -EXPORT_SYMBOL(ab3100_get_chip_type); -int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) +static int ab3100_set_register_interruptible(struct ab3100 *ab3100, + u8 reg, u8 regval) { u8 regandval[2] = {reg, regval}; int err; @@ -108,8 +99,14 @@ int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval) mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_set_register_interruptible); +static int set_register_interruptible(struct device *dev, + u8 bank, u8 reg, u8 value) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_set_register_interruptible(ab3100, reg, value); +} /* * The test registers exist at an I2C bus address up one @@ -148,8 +145,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100, return err; } - -int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) +static int ab3100_get_register_interruptible(struct ab3100 *ab3100, + u8 reg, u8 *regval) { int err; @@ -203,10 +200,16 @@ int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval) mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register_interruptible); +static int get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_get_register_interruptible(ab3100, reg, value); +} -int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, +static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, u8 first_reg, u8 *regvals, u8 numregs) { int err; @@ -260,10 +263,17 @@ int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_get_register_page_interruptible); +static int get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_get_register_page_interruptible(ab3100, + first_reg, regvals, numregs); +} -int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, +static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 andmask, u8 ormask) { u8 regandval[2] = {reg, 0}; @@ -331,8 +341,15 @@ int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, mutex_unlock(&ab3100->access_mutex); return err; } -EXPORT_SYMBOL(ab3100_mask_and_set_register_interruptible); +static int mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); + + return ab3100_mask_and_set_register_interruptible(ab3100, + reg, bitmask, (bitmask & bitvalues)); +} /* * Register a simple callback for handling any AB3100 events. @@ -357,15 +374,27 @@ int ab3100_event_unregister(struct ab3100 *ab3100, EXPORT_SYMBOL(ab3100_event_unregister); -int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, - u32 *fatevent) +static int ab3100_event_registers_startup_state_get(struct device *dev, + u8 *event) { + struct ab3100 *ab3100 = dev_get_drvdata(dev->parent); if (!ab3100->startup_events_read) return -EAGAIN; /* Try again later */ - *fatevent = ab3100->startup_events; + memcpy(event, ab3100->startup_events, 3); return 0; } -EXPORT_SYMBOL(ab3100_event_registers_startup_state_get); + +static struct abx500_ops ab3100_ops = { + .get_chip_id = ab3100_get_chip_id, + .set_register = set_register_interruptible, + .get_register = get_register_interruptible, + .get_register_page = get_register_page_interruptible, + .set_register_page = NULL, + .mask_and_set_register = mask_and_set_register_interruptible, + .event_registers_startup_state_get = + ab3100_event_registers_startup_state_get, + .startup_irq_enabled = NULL, +}; /* * This is a threaded interrupt handler so we can make some @@ -390,7 +419,9 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data) event_regs[2]; if (!ab3100->startup_events_read) { - ab3100->startup_events = fatevent; + ab3100->startup_events[0] = event_regs[0]; + ab3100->startup_events[1] = event_regs[1]; + ab3100->startup_events[2] = event_regs[2]; ab3100->startup_events_read = true; } /* @@ -703,7 +734,8 @@ static int __init ab3100_setup(struct ab3100 *ab3100) dev_warn(ab3100->dev, "AB3100 P1E variant detected, " "forcing chip to 32KHz\n"); - err = ab3100_set_test_register_interruptible(ab3100, 0x02, 0x08); + err = ab3100_set_test_register_interruptible(ab3100, + 0x02, 0x08); } exit_no_setup: @@ -898,6 +930,10 @@ static int __init ab3100_probe(struct i2c_client *client, if (err) goto exit_no_irq; + err = abx500_register_ops(&client->dev, &ab3100_ops); + if (err) + goto exit_no_ops; + /* Set parent and a pointer back to the container in device data */ for (i = 0; i < ARRAY_SIZE(ab3100_platform_devs); i++) { ab3100_platform_devs[i]->dev.parent = @@ -915,11 +951,13 @@ static int __init ab3100_probe(struct i2c_client *client, return 0; + exit_no_ops: exit_no_irq: exit_no_setup: i2c_unregister_device(ab3100->testreg_client); exit_no_testreg_client: exit_no_detect: + i2c_set_clientdata(client, NULL); kfree(ab3100); return err; } @@ -941,6 +979,7 @@ static int __exit ab3100_remove(struct i2c_client *client) * their notifiers so deactivate IRQ */ free_irq(client->irq, ab3100); + i2c_set_clientdata(client, NULL); kfree(ab3100); return 0; } diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c index 2d14655fdebd..63d2b727ddbb 100644 --- a/drivers/mfd/ab3100-otp.c +++ b/drivers/mfd/ab3100-otp.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include @@ -30,7 +30,6 @@ /** * struct ab3100_otp * @dev containing device - * @ab3100 a pointer to the parent ab3100 device struct * @locked whether the OTP is locked, after locking, no more bits * can be changed but before locking it is still possible * to change bits from 1->0. @@ -49,7 +48,6 @@ */ struct ab3100_otp { struct device *dev; - struct ab3100 *ab3100; bool locked; u32 freq; bool paf; @@ -63,19 +61,19 @@ struct ab3100_otp { static int __init ab3100_otp_read(struct ab3100_otp *otp) { - struct ab3100 *ab = otp->ab3100; u8 otpval[8]; u8 otpp; int err; - err = ab3100_get_register_interruptible(ab, AB3100_OTPP, &otpp); + err = abx500_get_register_interruptible(otp->dev, 0, + AB3100_OTPP, &otpp); if (err) { dev_err(otp->dev, "unable to read OTPP register\n"); return err; } - err = ab3100_get_register_page_interruptible(ab, AB3100_OTP0, - otpval, 8); + err = abx500_get_register_page_interruptible(otp->dev, 0, + AB3100_OTP0, otpval, 8); if (err) { dev_err(otp->dev, "unable to read OTP register page\n"); return err; @@ -197,7 +195,6 @@ static int __init ab3100_otp_probe(struct platform_device *pdev) otp->dev = &pdev->dev; /* Replace platform data coming in with a local struct */ - otp->ab3100 = platform_get_drvdata(pdev); platform_set_drvdata(pdev, otp); err = ab3100_otp_read(otp); diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c new file mode 100644 index 000000000000..1060f8e1c40a --- /dev/null +++ b/drivers/mfd/ab3550-core.c @@ -0,0 +1,1401 @@ +/* + * Copyright (C) 2007-2010 ST-Ericsson + * License terms: GNU General Public License (GPL) version 2 + * Low-level core for exclusive access to the AB3550 IC on the I2C bus + * and some basic chip-configuration. + * Author: Bengt Jonsson + * Author: Mattias Nilsson + * Author: Mattias Wallin + * Author: Rickard Andersson + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AB3550_NAME_STRING "ab3550" +#define AB3550_ID_FORMAT_STRING "AB3550 %s" +#define AB3550_NUM_BANKS 2 +#define AB3550_NUM_EVENT_REG 5 + +/* These are the only registers inside AB3550 used in this main file */ + +/* Chip ID register */ +#define AB3550_CID_REG 0x20 + +/* Interrupt event registers */ +#define AB3550_EVENT_BANK 0 +#define AB3550_EVENT_REG 0x22 + +/* Read/write operation values. */ +#define AB3550_PERM_RD (0x01) +#define AB3550_PERM_WR (0x02) + +/* Read/write permissions. */ +#define AB3550_PERM_RO (AB3550_PERM_RD) +#define AB3550_PERM_RW (AB3550_PERM_RD | AB3550_PERM_WR) + +/** + * struct ab3550 + * @access_mutex: lock out concurrent accesses to the AB registers + * @i2c_client: I2C client for this chip + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @mask_work: a worker for writing to mask registers + * @event_lock: a lock to protect the event_mask + * @event_mask: a local copy of the mask event registers + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + */ +struct ab3550 { + struct mutex access_mutex; + struct i2c_client *i2c_client[AB3550_NUM_BANKS]; + char chip_name[32]; + u8 chip_id; + struct work_struct mask_work; + spinlock_t event_lock; + u8 event_mask[AB3550_NUM_EVENT_REG]; + u8 startup_events[AB3550_NUM_EVENT_REG]; + bool startup_events_read; +#ifdef CONFIG_DEBUG_FS + unsigned int debug_bank; + unsigned int debug_address; +#endif +}; + +/** + * struct ab3550_reg_range + * @first: the first address of the range + * @last: the last address of the range + * @perm: access permissions for the range + */ +struct ab3550_reg_range { + u8 first; + u8 last; + u8 perm; +}; + +/** + * struct ab3550_reg_ranges + * @count: the number of ranges in the list + * @range: the list of register ranges + */ +struct ab3550_reg_ranges { + u8 count; + const struct ab3550_reg_range *range; +}; + +/* + * Permissible register ranges for reading and writing per device and bank. + * + * The ranges must be listed in increasing address order, and no overlaps are + * allowed. It is assumed that write permission implies read permission + * (i.e. only RO and RW permissions should be used). Ranges with write + * permission must not be split up. + */ + +#define NO_RANGE {.count = 0, .range = NULL,} + +static struct +ab3550_reg_ranges ab3550_reg_ranges[AB3550_NUM_DEVICES][AB3550_NUM_BANKS] = { + [AB3550_DEVID_DAC] = { + NO_RANGE, + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0xb0, + .last = 0xba, + .perm = AB3550_PERM_RW, + }, + { + .first = 0xbc, + .last = 0xc3, + .perm = AB3550_PERM_RW, + }, + }, + }, + }, + [AB3550_DEVID_LEDS] = { + NO_RANGE, + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x5a, + .last = 0x88, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x8a, + .last = 0xad, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_POWER] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_REGULATORS] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x69, + .last = 0xa3, + .perm = AB3550_PERM_RW, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x14, + .last = 0x16, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_SIM] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x14, + .last = 0x17, + .perm = AB3550_PERM_RW, + }, + } + + }, + }, + [AB3550_DEVID_UART] = { + NO_RANGE, + NO_RANGE, + }, + [AB3550_DEVID_RTC] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0c, + .perm = AB3550_PERM_RW, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_CHARGER] = { + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x10, + .last = 0x1a, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + NO_RANGE, + }, + [AB3550_DEVID_ADC] = { + NO_RANGE, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x20, + .last = 0x56, + .perm = AB3550_PERM_RW, + }, + + } + }, + }, + [AB3550_DEVID_FUELGAUGE] = { + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x21, + .last = 0x21, + .perm = AB3550_PERM_RO, + }, + } + }, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + .perm = AB3550_PERM_RW, + }, + } + }, + }, + [AB3550_DEVID_VIBRATOR] = { + NO_RANGE, + { + .count = 1, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x10, + .last = 0x13, + .perm = AB3550_PERM_RW, + }, + + } + }, + }, + [AB3550_DEVID_CODEC] = { + { + .count = 2, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x31, + .last = 0x63, + .perm = AB3550_PERM_RW, + }, + { + .first = 0x65, + .last = 0x68, + .perm = AB3550_PERM_RW, + }, + } + }, + NO_RANGE, + }, +}; + +static struct mfd_cell ab3550_devs[AB3550_NUM_DEVICES] = { + [AB3550_DEVID_DAC] = { + .name = "ab3550-dac", + .id = AB3550_DEVID_DAC, + .num_resources = 0, + }, + [AB3550_DEVID_LEDS] = { + .name = "ab3550-leds", + .id = AB3550_DEVID_LEDS, + }, + [AB3550_DEVID_POWER] = { + .name = "ab3550-power", + .id = AB3550_DEVID_POWER, + }, + [AB3550_DEVID_REGULATORS] = { + .name = "ab3550-regulators", + .id = AB3550_DEVID_REGULATORS, + }, + [AB3550_DEVID_SIM] = { + .name = "ab3550-sim", + .id = AB3550_DEVID_SIM, + }, + [AB3550_DEVID_UART] = { + .name = "ab3550-uart", + .id = AB3550_DEVID_UART, + }, + [AB3550_DEVID_RTC] = { + .name = "ab3550-rtc", + .id = AB3550_DEVID_RTC, + }, + [AB3550_DEVID_CHARGER] = { + .name = "ab3550-charger", + .id = AB3550_DEVID_CHARGER, + }, + [AB3550_DEVID_ADC] = { + .name = "ab3550-adc", + .id = AB3550_DEVID_ADC, + .num_resources = 10, + .resources = (struct resource[]) { + { + .name = "TRIGGER-0", + .flags = IORESOURCE_IRQ, + .start = 16, + .end = 16, + }, + { + .name = "TRIGGER-1", + .flags = IORESOURCE_IRQ, + .start = 17, + .end = 17, + }, + { + .name = "TRIGGER-2", + .flags = IORESOURCE_IRQ, + .start = 18, + .end = 18, + }, + { + .name = "TRIGGER-3", + .flags = IORESOURCE_IRQ, + .start = 19, + .end = 19, + }, + { + .name = "TRIGGER-4", + .flags = IORESOURCE_IRQ, + .start = 20, + .end = 20, + }, + { + .name = "TRIGGER-5", + .flags = IORESOURCE_IRQ, + .start = 21, + .end = 21, + }, + { + .name = "TRIGGER-6", + .flags = IORESOURCE_IRQ, + .start = 22, + .end = 22, + }, + { + .name = "TRIGGER-7", + .flags = IORESOURCE_IRQ, + .start = 23, + .end = 23, + }, + { + .name = "TRIGGER-VBAT-TXON", + .flags = IORESOURCE_IRQ, + .start = 13, + .end = 13, + }, + { + .name = "TRIGGER-VBAT", + .flags = IORESOURCE_IRQ, + .start = 12, + .end = 12, + }, + }, + }, + [AB3550_DEVID_FUELGAUGE] = { + .name = "ab3550-fuelgauge", + .id = AB3550_DEVID_FUELGAUGE, + }, + [AB3550_DEVID_VIBRATOR] = { + .name = "ab3550-vibrator", + .id = AB3550_DEVID_VIBRATOR, + }, + [AB3550_DEVID_CODEC] = { + .name = "ab3550-codec", + .id = AB3550_DEVID_CODEC, + }, +}; + +/* + * I2C transactions with error messages. + */ +static int ab3550_i2c_master_send(struct ab3550 *ab, u8 bank, u8 *data, + u8 count) +{ + int err; + + err = i2c_master_send(ab->i2c_client[bank], data, count); + if (err < 0) { + dev_err(&ab->i2c_client[0]->dev, "send error: %d\n", err); + return err; + } + return 0; +} + +static int ab3550_i2c_master_recv(struct ab3550 *ab, u8 bank, u8 *data, + u8 count) +{ + int err; + + err = i2c_master_recv(ab->i2c_client[bank], data, count); + if (err < 0) { + dev_err(&ab->i2c_client[0]->dev, "receive error: %d\n", err); + return err; + } + return 0; +} + +/* + * Functionality for getting/setting register values. + */ +static int get_register_interruptible(struct ab3550 *ab, u8 bank, u8 reg, + u8 *value) +{ + int err; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + err = ab3550_i2c_master_send(ab, bank, ®, 1); + if (!err) + err = ab3550_i2c_master_recv(ab, bank, value, 1); + + mutex_unlock(&ab->access_mutex); + return err; +} + +static int get_register_page_interruptible(struct ab3550 *ab, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + int err; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + err = ab3550_i2c_master_send(ab, bank, &first_reg, 1); + if (!err) + err = ab3550_i2c_master_recv(ab, bank, regvals, numregs); + + mutex_unlock(&ab->access_mutex); + return err; +} + +static int mask_and_set_register_interruptible(struct ab3550 *ab, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + int err = 0; + + if (likely(bitmask)) { + u8 reg_bits[2] = {reg, 0}; + + err = mutex_lock_interruptible(&ab->access_mutex); + if (err) + return err; + + if (bitmask == 0xFF) /* No need to read in this case. */ + reg_bits[1] = bitvalues; + else { /* Read and modify the register value. */ + u8 bits; + + err = ab3550_i2c_master_send(ab, bank, ®, 1); + if (err) + goto unlock_and_return; + err = ab3550_i2c_master_recv(ab, bank, &bits, 1); + if (err) + goto unlock_and_return; + reg_bits[1] = ((~bitmask & bits) | + (bitmask & bitvalues)); + } + /* Write the new value. */ + err = ab3550_i2c_master_send(ab, bank, reg_bits, 2); +unlock_and_return: + mutex_unlock(&ab->access_mutex); + } + return err; +} + +/* + * Read/write permission checking functions. + */ +static bool page_write_allowed(const struct ab3550_reg_ranges *ranges, + u8 first_reg, u8 last_reg) +{ + u8 i; + + if (last_reg < first_reg) + return false; + + for (i = 0; i < ranges->count; i++) { + if (first_reg < ranges->range[i].first) + break; + if ((last_reg <= ranges->range[i].last) && + (ranges->range[i].perm & AB3550_PERM_WR)) + return true; + } + return false; +} + +static bool reg_write_allowed(const struct ab3550_reg_ranges *ranges, u8 reg) +{ + return page_write_allowed(ranges, reg, reg); +} + +static bool page_read_allowed(const struct ab3550_reg_ranges *ranges, + u8 first_reg, u8 last_reg) +{ + u8 i; + + if (last_reg < first_reg) + return false; + /* Find the range (if it exists in the list) that includes first_reg. */ + for (i = 0; i < ranges->count; i++) { + if (first_reg < ranges->range[i].first) + return false; + if (first_reg <= ranges->range[i].last) + break; + } + /* Make sure that the entire range up to and including last_reg is + * readable. This may span several of the ranges in the list. + */ + while ((i < ranges->count) && + (ranges->range[i].perm & AB3550_PERM_RD)) { + if (last_reg <= ranges->range[i].last) + return true; + if ((++i >= ranges->count) || + (ranges->range[i].first != + (ranges->range[i - 1].last + 1))) { + break; + } + } + return false; +} + +static bool reg_read_allowed(const struct ab3550_reg_ranges *ranges, u8 reg) +{ + return page_read_allowed(ranges, reg, reg); +} + +/* + * The exported register access functionality. + */ +int ab3550_get_chip_id(struct device *dev) +{ + struct ab3550 *ab = dev_get_drvdata(dev->parent); + return (int)ab->chip_id; +} + +int ab3550_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !reg_write_allowed(&ab3550_reg_ranges[pdev->id][bank], reg)) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return mask_and_set_register_interruptible(ab, bank, reg, + bitmask, bitvalues); +} + +int ab3550_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value) +{ + return ab3550_mask_and_set_register_interruptible(dev, bank, reg, 0xFF, + value); +} + +int ab3550_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !reg_read_allowed(&ab3550_reg_ranges[pdev->id][bank], reg)) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return get_register_interruptible(ab, bank, reg, value); +} + +int ab3550_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct ab3550 *ab; + struct platform_device *pdev = to_platform_device(dev); + + if ((AB3550_NUM_BANKS <= bank) || + !page_read_allowed(&ab3550_reg_ranges[pdev->id][bank], + first_reg, (first_reg + numregs - 1))) + return -EINVAL; + + ab = dev_get_drvdata(dev->parent); + return get_register_page_interruptible(ab, bank, first_reg, regvals, + numregs); +} + +int ab3550_event_registers_startup_state_get(struct device *dev, u8 *event) +{ + struct ab3550 *ab; + + ab = dev_get_drvdata(dev->parent); + if (!ab->startup_events_read) + return -EAGAIN; /* Try again later */ + + memcpy(event, ab->startup_events, AB3550_NUM_EVENT_REG); + return 0; +} + +int ab3550_startup_irq_enabled(struct device *dev, unsigned int irq) +{ + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + bool val; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + val = ((ab->startup_events[irq / 8] & BIT(irq % 8)) != 0); + + return val; +} + +static struct abx500_ops ab3550_ops = { + .get_chip_id = ab3550_get_chip_id, + .get_register = ab3550_get_register_interruptible, + .set_register = ab3550_set_register_interruptible, + .get_register_page = ab3550_get_register_page_interruptible, + .set_register_page = NULL, + .mask_and_set_register = ab3550_mask_and_set_register_interruptible, + .event_registers_startup_state_get = + ab3550_event_registers_startup_state_get, + .startup_irq_enabled = ab3550_startup_irq_enabled, +}; + +static irqreturn_t ab3550_irq_handler(int irq, void *data) +{ + struct ab3550 *ab = data; + int err; + unsigned int i; + u8 e[AB3550_NUM_EVENT_REG]; + u8 *events; + unsigned long flags; + + events = (ab->startup_events_read ? e : ab->startup_events); + + err = get_register_page_interruptible(ab, AB3550_EVENT_BANK, + AB3550_EVENT_REG, events, AB3550_NUM_EVENT_REG); + if (err) + goto err_event_rd; + + if (!ab->startup_events_read) { + dev_info(&ab->i2c_client[0]->dev, + "startup events 0x%x,0x%x,0x%x,0x%x,0x%x\n", + ab->startup_events[0], ab->startup_events[1], + ab->startup_events[2], ab->startup_events[3], + ab->startup_events[4]); + ab->startup_events_read = true; + goto out; + } + + /* The two highest bits in event[4] are not used. */ + events[4] &= 0x3f; + + spin_lock_irqsave(&ab->event_lock, flags); + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) + events[i] &= ~ab->event_mask[i]; + spin_unlock_irqrestore(&ab->event_lock, flags); + + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) { + u8 bit; + u8 event_reg; + + dev_dbg(&ab->i2c_client[0]->dev, "IRQ Event[%d]: 0x%2x\n", + i, events[i]); + + event_reg = events[i]; + for (bit = 0; event_reg; bit++, event_reg /= 2) { + if (event_reg % 2) { + unsigned int irq; + struct ab3550_platform_data *plf_data; + + plf_data = ab->i2c_client[0]->dev.platform_data; + irq = plf_data->irq.base + (i * 8) + bit; + handle_nested_irq(irq); + } + } + } +out: + return IRQ_HANDLED; + +err_event_rd: + dev_dbg(&ab->i2c_client[0]->dev, "error reading event registers\n"); + return IRQ_HANDLED; +} + +#ifdef CONFIG_DEBUG_FS +static struct ab3550_reg_ranges debug_ranges[AB3550_NUM_BANKS] = { + { + .count = 6, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + }, + { + .first = 0x10, + .last = 0x1a, + }, + { + .first = 0x1e, + .last = 0x4f, + }, + { + .first = 0x51, + .last = 0x63, + }, + { + .first = 0x65, + .last = 0xa3, + }, + { + .first = 0xa5, + .last = 0xa8, + }, + } + }, + { + .count = 8, + .range = (struct ab3550_reg_range[]) { + { + .first = 0x00, + .last = 0x0e, + }, + { + .first = 0x10, + .last = 0x17, + }, + { + .first = 0x1a, + .last = 0x1c, + }, + { + .first = 0x20, + .last = 0x56, + }, + { + .first = 0x5a, + .last = 0x88, + }, + { + .first = 0x8a, + .last = 0xad, + }, + { + .first = 0xb0, + .last = 0xba, + }, + { + .first = 0xbc, + .last = 0xc3, + }, + } + }, +}; + +static int ab3550_registers_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + int bank; + + seq_printf(s, AB3550_NAME_STRING " register values:\n"); + + for (bank = 0; bank < AB3550_NUM_BANKS; bank++) { + unsigned int i; + + seq_printf(s, " bank %d:\n", bank); + for (i = 0; i < debug_ranges[bank].count; i++) { + u8 reg; + + for (reg = debug_ranges[bank].range[i].first; + reg <= debug_ranges[bank].range[i].last; + reg++) { + u8 value; + + get_register_interruptible(ab, bank, reg, + &value); + seq_printf(s, " [%d/0x%02X]: 0x%02X\n", bank, + reg, value); + } + } + } + return 0; +} + +static int ab3550_registers_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_registers_print, inode->i_private); +} + +static const struct file_operations ab3550_registers_fops = { + .open = ab3550_registers_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int ab3550_bank_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + + seq_printf(s, "%d\n", ab->debug_bank); + return 0; +} + +static int ab3550_bank_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_bank_print, inode->i_private); +} + +static ssize_t ab3550_bank_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_bank; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_bank); + if (err) + return -EINVAL; + + if (user_bank >= AB3550_NUM_BANKS) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > number of banks\n"); + return -EINVAL; + } + + ab->debug_bank = user_bank; + + return buf_size; +} + +static int ab3550_address_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + + seq_printf(s, "0x%02X\n", ab->debug_address); + return 0; +} + +static int ab3550_address_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_address_print, inode->i_private); +} + +static ssize_t ab3550_address_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_address; + int err; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_address); + if (err) + return -EINVAL; + if (user_address > 0xff) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > 0xff\n"); + return -EINVAL; + } + ab->debug_address = user_address; + return buf_size; +} + +static int ab3550_val_print(struct seq_file *s, void *p) +{ + struct ab3550 *ab = s->private; + int err; + u8 regvalue; + + err = get_register_interruptible(ab, (u8)ab->debug_bank, + (u8)ab->debug_address, ®value); + if (err) + return -EINVAL; + seq_printf(s, "0x%02X\n", regvalue); + + return 0; +} + +static int ab3550_val_open(struct inode *inode, struct file *file) +{ + return single_open(file, ab3550_val_print, inode->i_private); +} + +static ssize_t ab3550_val_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ab3550 *ab = ((struct seq_file *)(file->private_data))->private; + char buf[32]; + int buf_size; + unsigned long user_val; + int err; + u8 regvalue; + + /* Get userspace string and assure termination */ + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + err = strict_strtoul(buf, 0, &user_val); + if (err) + return -EINVAL; + if (user_val > 0xff) { + dev_err(&ab->i2c_client[0]->dev, + "debugfs error input > 0xff\n"); + return -EINVAL; + } + err = mask_and_set_register_interruptible( + ab, (u8)ab->debug_bank, + (u8)ab->debug_address, 0xFF, (u8)user_val); + if (err) + return -EINVAL; + + get_register_interruptible(ab, (u8)ab->debug_bank, + (u8)ab->debug_address, ®value); + if (err) + return -EINVAL; + + return buf_size; +} + +static const struct file_operations ab3550_bank_fops = { + .open = ab3550_bank_open, + .write = ab3550_bank_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab3550_address_fops = { + .open = ab3550_address_open, + .write = ab3550_address_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct file_operations ab3550_val_fops = { + .open = ab3550_val_open, + .write = ab3550_val_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static struct dentry *ab3550_dir; +static struct dentry *ab3550_reg_file; +static struct dentry *ab3550_bank_file; +static struct dentry *ab3550_address_file; +static struct dentry *ab3550_val_file; + +static inline void ab3550_setup_debugfs(struct ab3550 *ab) +{ + ab->debug_bank = 0; + ab->debug_address = 0x00; + + ab3550_dir = debugfs_create_dir(AB3550_NAME_STRING, NULL); + if (!ab3550_dir) + goto exit_no_debugfs; + + ab3550_reg_file = debugfs_create_file("all-registers", + S_IRUGO, ab3550_dir, ab, &ab3550_registers_fops); + if (!ab3550_reg_file) + goto exit_destroy_dir; + + ab3550_bank_file = debugfs_create_file("register-bank", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_bank_fops); + if (!ab3550_bank_file) + goto exit_destroy_reg; + + ab3550_address_file = debugfs_create_file("register-address", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_address_fops); + if (!ab3550_address_file) + goto exit_destroy_bank; + + ab3550_val_file = debugfs_create_file("register-value", + (S_IRUGO | S_IWUGO), ab3550_dir, ab, &ab3550_val_fops); + if (!ab3550_val_file) + goto exit_destroy_address; + + return; + +exit_destroy_address: + debugfs_remove(ab3550_address_file); +exit_destroy_bank: + debugfs_remove(ab3550_bank_file); +exit_destroy_reg: + debugfs_remove(ab3550_reg_file); +exit_destroy_dir: + debugfs_remove(ab3550_dir); +exit_no_debugfs: + dev_err(&ab->i2c_client[0]->dev, "failed to create debugfs entries.\n"); + return; +} + +static inline void ab3550_remove_debugfs(void) +{ + debugfs_remove(ab3550_val_file); + debugfs_remove(ab3550_address_file); + debugfs_remove(ab3550_bank_file); + debugfs_remove(ab3550_reg_file); + debugfs_remove(ab3550_dir); +} + +#else /* !CONFIG_DEBUG_FS */ +static inline void ab3550_setup_debugfs(struct ab3550 *ab) +{ +} +static inline void ab3550_remove_debugfs(void) +{ +} +#endif + +/* + * Basic set-up, datastructure creation/destruction and I2C interface. + * This sets up a default config in the AB3550 chip so that it + * will work as expected. + */ +static int __init ab3550_setup(struct ab3550 *ab) +{ + int err = 0; + int i; + struct ab3550_platform_data *plf_data; + struct abx500_init_settings *settings; + + plf_data = ab->i2c_client[0]->dev.platform_data; + settings = plf_data->init_settings; + + for (i = 0; i < plf_data->init_settings_sz; i++) { + err = mask_and_set_register_interruptible(ab, + settings[i].bank, + settings[i].reg, + 0xFF, settings[i].setting); + if (err) + goto exit_no_setup; + + /* If event mask register update the event mask in ab3550 */ + if ((settings[i].bank == 0) && + (AB3550_IMR1 <= settings[i].reg) && + (settings[i].reg <= AB3550_IMR5)) { + ab->event_mask[settings[i].reg - AB3550_IMR1] = + settings[i].setting; + } + } +exit_no_setup: + return err; +} + +static void ab3550_mask_work(struct work_struct *work) +{ + struct ab3550 *ab = container_of(work, struct ab3550, mask_work); + int i; + unsigned long flags; + u8 mask[AB3550_NUM_EVENT_REG]; + + spin_lock_irqsave(&ab->event_lock, flags); + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) + mask[i] = ab->event_mask[i]; + spin_unlock_irqrestore(&ab->event_lock, flags); + + for (i = 0; i < AB3550_NUM_EVENT_REG; i++) { + int err; + + err = mask_and_set_register_interruptible(ab, 0, + (AB3550_IMR1 + i), ~0, mask[i]); + if (err) + dev_err(&ab->i2c_client[0]->dev, + "ab3550_mask_work failed 0x%x,0x%x\n", + (AB3550_IMR1 + i), mask[i]); + } +} + +static void ab3550_mask(unsigned int irq) +{ + unsigned long flags; + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + + spin_lock_irqsave(&ab->event_lock, flags); + ab->event_mask[irq / 8] |= BIT(irq % 8); + spin_unlock_irqrestore(&ab->event_lock, flags); + + schedule_work(&ab->mask_work); +} + +static void ab3550_unmask(unsigned int irq) +{ + unsigned long flags; + struct ab3550 *ab; + struct ab3550_platform_data *plf_data; + + ab = get_irq_chip_data(irq); + plf_data = ab->i2c_client[0]->dev.platform_data; + irq -= plf_data->irq.base; + + spin_lock_irqsave(&ab->event_lock, flags); + ab->event_mask[irq / 8] &= ~BIT(irq % 8); + spin_unlock_irqrestore(&ab->event_lock, flags); + + schedule_work(&ab->mask_work); +} + +static void noop(unsigned int irq) +{ +} + +static struct irq_chip ab3550_irq_chip = { + .name = "ab3550-core", /* Keep the same name as the request */ + .startup = NULL, /* defaults to enable */ + .shutdown = NULL, /* defaults to disable */ + .enable = NULL, /* defaults to unmask */ + .disable = ab3550_mask, /* No default to mask in chip.c */ + .ack = noop, + .mask = ab3550_mask, + .unmask = ab3550_unmask, + .end = NULL, +}; + +struct ab_family_id { + u8 id; + char *name; +}; + +static const struct ab_family_id ids[] __initdata = { + /* AB3550 */ + { + .id = AB3550_P1A, + .name = "P1A" + }, + /* Terminator */ + { + .id = 0x00, + } +}; + +static int __init ab3550_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ab3550 *ab; + struct ab3550_platform_data *ab3550_plf_data = + client->dev.platform_data; + int err; + int i; + int num_i2c_clients = 0; + + ab = kzalloc(sizeof(struct ab3550), GFP_KERNEL); + if (!ab) { + dev_err(&client->dev, + "could not allocate " AB3550_NAME_STRING " device\n"); + return -ENOMEM; + } + + /* Initialize data structure */ + mutex_init(&ab->access_mutex); + spin_lock_init(&ab->event_lock); + ab->i2c_client[0] = client; + + i2c_set_clientdata(client, ab); + + /* Read chip ID register */ + err = get_register_interruptible(ab, 0, AB3550_CID_REG, &ab->chip_id); + if (err) { + dev_err(&client->dev, "could not communicate with the analog " + "baseband chip\n"); + goto exit_no_detect; + } + + for (i = 0; ids[i].id != 0x0; i++) { + if (ids[i].id == ab->chip_id) { + snprintf(&ab->chip_name[0], sizeof(ab->chip_name) - 1, + AB3550_ID_FORMAT_STRING, ids[i].name); + break; + } + } + + if (ids[i].id == 0x0) { + dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n", + ab->chip_id); + dev_err(&client->dev, "driver not started!\n"); + goto exit_no_detect; + } + + dev_info(&client->dev, "detected AB chip: %s\n", &ab->chip_name[0]); + + /* Attach other dummy I2C clients. */ + while (++num_i2c_clients < AB3550_NUM_BANKS) { + ab->i2c_client[num_i2c_clients] = + i2c_new_dummy(client->adapter, + (client->addr + num_i2c_clients)); + if (!ab->i2c_client[num_i2c_clients]) { + err = -ENOMEM; + goto exit_no_dummy_client; + } + strlcpy(ab->i2c_client[num_i2c_clients]->name, id->name, + sizeof(ab->i2c_client[num_i2c_clients]->name)); + } + + err = ab3550_setup(ab); + if (err) + goto exit_no_setup; + + INIT_WORK(&ab->mask_work, ab3550_mask_work); + + for (i = 0; i < ab3550_plf_data->irq.count; i++) { + unsigned int irq; + + irq = ab3550_plf_data->irq.base + i; + set_irq_chip_data(irq, ab); + set_irq_chip_and_handler(irq, &ab3550_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler, + IRQF_ONESHOT, "ab3550-core", ab); + /* This real unpredictable IRQ is of course sampled for entropy */ + rand_initialize_irq(client->irq); + + if (err) + goto exit_no_irq; + + err = abx500_register_ops(&client->dev, &ab3550_ops); + if (err) + goto exit_no_ops; + + /* Set up and register the platform devices. */ + for (i = 0; i < AB3550_NUM_DEVICES; i++) { + ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i]; + ab3550_devs[i].data_size = ab3550_plf_data->dev_data_sz[i]; + } + + err = mfd_add_devices(&client->dev, 0, ab3550_devs, + ARRAY_SIZE(ab3550_devs), NULL, + ab3550_plf_data->irq.base); + + ab3550_setup_debugfs(ab); + + return 0; + +exit_no_ops: +exit_no_irq: +exit_no_setup: +exit_no_dummy_client: + /* Unregister the dummy i2c clients. */ + while (--num_i2c_clients) + i2c_unregister_device(ab->i2c_client[num_i2c_clients]); +exit_no_detect: + kfree(ab); + return err; +} + +static int __exit ab3550_remove(struct i2c_client *client) +{ + struct ab3550 *ab = i2c_get_clientdata(client); + int num_i2c_clients = AB3550_NUM_BANKS; + + mfd_remove_devices(&client->dev); + ab3550_remove_debugfs(); + + while (--num_i2c_clients) + i2c_unregister_device(ab->i2c_client[num_i2c_clients]); + + /* + * At this point, all subscribers should have unregistered + * their notifiers so deactivate IRQ + */ + free_irq(client->irq, ab); + i2c_set_clientdata(client, NULL); + kfree(ab); + return 0; +} + +static const struct i2c_device_id ab3550_id[] = { + {AB3550_NAME_STRING, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, ab3550_id); + +static struct i2c_driver ab3550_driver = { + .driver = { + .name = AB3550_NAME_STRING, + .owner = THIS_MODULE, + }, + .id_table = ab3550_id, + .probe = ab3550_probe, + .remove = __exit_p(ab3550_remove), +}; + +static int __init ab3550_i2c_init(void) +{ + return i2c_add_driver(&ab3550_driver); +} + +static void __exit ab3550_i2c_exit(void) +{ + i2c_del_driver(&ab3550_driver); +} + +subsys_initcall(ab3550_i2c_init); +module_exit(ab3550_i2c_exit); + +MODULE_AUTHOR("Mattias Wallin "); +MODULE_DESCRIPTION("AB3550 core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/ab4500-core.c b/drivers/mfd/ab4500-core.c deleted file mode 100644 index c275daa3ab1a..000000000000 --- a/drivers/mfd/ab4500-core.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (C) 2009 ST-Ericsson - * - * Author: Srinidhi KASAGAR - * - * This program is free software; you can redistribute it - * and/or modify it under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation. - * - * AB4500 is a companion power management chip used with U8500. - * On this platform, this is interfaced with SSP0 controller - * which is a ARM primecell pl022. - * - * At the moment the module just exports read/write features. - * Interrupt management to be added - TODO. - */ -#include -#include -#include -#include -#include -#include -#include - -/* just required if probe fails, we need to - * unregister the device - */ -static struct spi_driver ab4500_driver; - -/* - * This funtion writes to any AB4500 registers using - * SPI protocol & before it writes it packs the data - * in the below 24 bit frame format - * - * *|------------------------------------| - * *| 23|22...18|17.......10|9|8|7......0| - * *| r/w bank adr data | - * * ------------------------------------ - * - * This function shouldn't be called from interrupt - * context - */ -int ab4500_write(struct ab4500 *ab4500, unsigned char block, - unsigned long addr, unsigned char data) -{ - struct spi_transfer xfer; - struct spi_message msg; - int err; - unsigned long spi_data = - block << 18 | addr << 10 | data; - - mutex_lock(&ab4500->lock); - ab4500->tx_buf[0] = spi_data; - ab4500->rx_buf[0] = 0; - - xfer.tx_buf = ab4500->tx_buf; - xfer.rx_buf = NULL; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - err = spi_sync(ab4500->spi, &msg); - mutex_unlock(&ab4500->lock); - - return err; -} -EXPORT_SYMBOL(ab4500_write); - -int ab4500_read(struct ab4500 *ab4500, unsigned char block, - unsigned long addr) -{ - struct spi_transfer xfer; - struct spi_message msg; - unsigned long spi_data = - 1 << 23 | block << 18 | addr << 10; - - mutex_lock(&ab4500->lock); - ab4500->tx_buf[0] = spi_data; - ab4500->rx_buf[0] = 0; - - xfer.tx_buf = ab4500->tx_buf; - xfer.rx_buf = ab4500->rx_buf; - xfer.len = sizeof(unsigned long); - - spi_message_init(&msg); - spi_message_add_tail(&xfer, &msg); - - spi_sync(ab4500->spi, &msg); - mutex_unlock(&ab4500->lock); - - return ab4500->rx_buf[0]; -} -EXPORT_SYMBOL(ab4500_read); - -/* ref: ab3100 core */ -#define AB4500_DEVICE(devname, devid) \ -static struct platform_device ab4500_##devname##_device = { \ - .name = devid, \ - .id = -1, \ -} - -/* list of childern devices of ab4500 - all are - * not populated here - TODO - */ -AB4500_DEVICE(charger, "ab4500-charger"); -AB4500_DEVICE(audio, "ab4500-audio"); -AB4500_DEVICE(usb, "ab4500-usb"); -AB4500_DEVICE(tvout, "ab4500-tvout"); -AB4500_DEVICE(sim, "ab4500-sim"); -AB4500_DEVICE(gpadc, "ab4500-gpadc"); -AB4500_DEVICE(clkmgt, "ab4500-clkmgt"); -AB4500_DEVICE(misc, "ab4500-misc"); - -static struct platform_device *ab4500_platform_devs[] = { - &ab4500_charger_device, - &ab4500_audio_device, - &ab4500_usb_device, - &ab4500_tvout_device, - &ab4500_sim_device, - &ab4500_gpadc_device, - &ab4500_clkmgt_device, - &ab4500_misc_device, -}; - -static int __init ab4500_probe(struct spi_device *spi) -{ - struct ab4500 *ab4500; - unsigned char revision; - int err = 0; - int i; - - ab4500 = kzalloc(sizeof *ab4500, GFP_KERNEL); - if (!ab4500) { - dev_err(&spi->dev, "could not allocate AB4500\n"); - err = -ENOMEM; - goto not_detect; - } - - ab4500->spi = spi; - spi_set_drvdata(spi, ab4500); - - mutex_init(&ab4500->lock); - - /* read the revision register */ - revision = ab4500_read(ab4500, AB4500_MISC, AB4500_REV_REG); - - /* revision id 0x0 is for early drop, 0x10 is for cut1.0 */ - if (revision == 0x0 || revision == 0x10) - dev_info(&spi->dev, "Detected chip: %s, revision = %x\n", - ab4500_driver.driver.name, revision); - else { - dev_err(&spi->dev, "unknown chip: 0x%x\n", revision); - goto not_detect; - } - - for (i = 0; i < ARRAY_SIZE(ab4500_platform_devs); i++) { - ab4500_platform_devs[i]->dev.parent = - &spi->dev; - platform_set_drvdata(ab4500_platform_devs[i], ab4500); - } - - /* register the ab4500 platform devices */ - platform_add_devices(ab4500_platform_devs, - ARRAY_SIZE(ab4500_platform_devs)); - - return err; - - not_detect: - spi_unregister_driver(&ab4500_driver); - kfree(ab4500); - return err; -} - -static int __devexit ab4500_remove(struct spi_device *spi) -{ - struct ab4500 *ab4500 = - spi_get_drvdata(spi); - - kfree(ab4500); - - return 0; -} - -static struct spi_driver ab4500_driver = { - .driver = { - .name = "ab4500", - .owner = THIS_MODULE, - }, - .probe = ab4500_probe, - .remove = __devexit_p(ab4500_remove) -}; - -static int __devinit ab4500_init(void) -{ - return spi_register_driver(&ab4500_driver); -} - -static void __exit ab4500_exit(void) -{ - spi_unregister_driver(&ab4500_driver); -} - -subsys_initcall(ab4500_init); -module_exit(ab4500_exit); - -MODULE_AUTHOR("Srinidhi KASAGAR + * Author: Rabin Vincent + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Interrupt register offsets + * Bank : 0x0E + */ +#define AB8500_IT_SOURCE1_REG 0x0E00 +#define AB8500_IT_SOURCE2_REG 0x0E01 +#define AB8500_IT_SOURCE3_REG 0x0E02 +#define AB8500_IT_SOURCE4_REG 0x0E03 +#define AB8500_IT_SOURCE5_REG 0x0E04 +#define AB8500_IT_SOURCE6_REG 0x0E05 +#define AB8500_IT_SOURCE7_REG 0x0E06 +#define AB8500_IT_SOURCE8_REG 0x0E07 +#define AB8500_IT_SOURCE19_REG 0x0E12 +#define AB8500_IT_SOURCE20_REG 0x0E13 +#define AB8500_IT_SOURCE21_REG 0x0E14 +#define AB8500_IT_SOURCE22_REG 0x0E15 +#define AB8500_IT_SOURCE23_REG 0x0E16 +#define AB8500_IT_SOURCE24_REG 0x0E17 + +/* + * latch registers + */ +#define AB8500_IT_LATCH1_REG 0x0E20 +#define AB8500_IT_LATCH2_REG 0x0E21 +#define AB8500_IT_LATCH3_REG 0x0E22 +#define AB8500_IT_LATCH4_REG 0x0E23 +#define AB8500_IT_LATCH5_REG 0x0E24 +#define AB8500_IT_LATCH6_REG 0x0E25 +#define AB8500_IT_LATCH7_REG 0x0E26 +#define AB8500_IT_LATCH8_REG 0x0E27 +#define AB8500_IT_LATCH9_REG 0x0E28 +#define AB8500_IT_LATCH10_REG 0x0E29 +#define AB8500_IT_LATCH19_REG 0x0E32 +#define AB8500_IT_LATCH20_REG 0x0E33 +#define AB8500_IT_LATCH21_REG 0x0E34 +#define AB8500_IT_LATCH22_REG 0x0E35 +#define AB8500_IT_LATCH23_REG 0x0E36 +#define AB8500_IT_LATCH24_REG 0x0E37 + +/* + * mask registers + */ + +#define AB8500_IT_MASK1_REG 0x0E40 +#define AB8500_IT_MASK2_REG 0x0E41 +#define AB8500_IT_MASK3_REG 0x0E42 +#define AB8500_IT_MASK4_REG 0x0E43 +#define AB8500_IT_MASK5_REG 0x0E44 +#define AB8500_IT_MASK6_REG 0x0E45 +#define AB8500_IT_MASK7_REG 0x0E46 +#define AB8500_IT_MASK8_REG 0x0E47 +#define AB8500_IT_MASK9_REG 0x0E48 +#define AB8500_IT_MASK10_REG 0x0E49 +#define AB8500_IT_MASK11_REG 0x0E4A +#define AB8500_IT_MASK12_REG 0x0E4B +#define AB8500_IT_MASK13_REG 0x0E4C +#define AB8500_IT_MASK14_REG 0x0E4D +#define AB8500_IT_MASK15_REG 0x0E4E +#define AB8500_IT_MASK16_REG 0x0E4F +#define AB8500_IT_MASK17_REG 0x0E50 +#define AB8500_IT_MASK18_REG 0x0E51 +#define AB8500_IT_MASK19_REG 0x0E52 +#define AB8500_IT_MASK20_REG 0x0E53 +#define AB8500_IT_MASK21_REG 0x0E54 +#define AB8500_IT_MASK22_REG 0x0E55 +#define AB8500_IT_MASK23_REG 0x0E56 +#define AB8500_IT_MASK24_REG 0x0E57 + +#define AB8500_REV_REG 0x1080 + +/* + * Map interrupt numbers to the LATCH and MASK register offsets, Interrupt + * numbers are indexed into this array with (num / 8). + * + * This is one off from the register names, i.e. AB8500_IT_MASK1_REG is at + * offset 0. + */ +static const int ab8500_irq_regoffset[AB8500_NUM_IRQ_REGS] = { + 0, 1, 2, 3, 4, 6, 7, 8, 9, 18, 19, 20, 21, +}; + +static int __ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + int ret; + + dev_vdbg(ab8500->dev, "wr: addr %#x <= %#x\n", addr, data); + + ret = ab8500->write(ab8500, addr, data); + if (ret < 0) + dev_err(ab8500->dev, "failed to write reg %#x: %d\n", + addr, ret); + + return ret; +} + +/** + * ab8500_write() - write an AB8500 register + * @ab8500: device to write to + * @addr: address of the register + * @data: value to write + */ +int ab8500_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + int ret; + + mutex_lock(&ab8500->lock); + ret = __ab8500_write(ab8500, addr, data); + mutex_unlock(&ab8500->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_write); + +static int __ab8500_read(struct ab8500 *ab8500, u16 addr) +{ + int ret; + + ret = ab8500->read(ab8500, addr); + if (ret < 0) + dev_err(ab8500->dev, "failed to read reg %#x: %d\n", + addr, ret); + + dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); + + return ret; +} + +/** + * ab8500_read() - read an AB8500 register + * @ab8500: device to read from + * @addr: address of the register + */ +int ab8500_read(struct ab8500 *ab8500, u16 addr) +{ + int ret; + + mutex_lock(&ab8500->lock); + ret = __ab8500_read(ab8500, addr); + mutex_unlock(&ab8500->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_read); + +/** + * ab8500_set_bits() - set a bitfield in an AB8500 register + * @ab8500: device to read from + * @addr: address of the register + * @mask: mask of the bitfield to modify + * @data: value to set to the bitfield + */ +int ab8500_set_bits(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data) +{ + int ret; + + mutex_lock(&ab8500->lock); + + ret = __ab8500_read(ab8500, addr); + if (ret < 0) + goto out; + + ret &= ~mask; + ret |= data; + + ret = __ab8500_write(ab8500, addr, ret); + +out: + mutex_unlock(&ab8500->lock); + return ret; +} +EXPORT_SYMBOL_GPL(ab8500_set_bits); + +static void ab8500_irq_lock(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + + mutex_lock(&ab8500->irq_lock); +} + +static void ab8500_irq_sync_unlock(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int i; + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { + u8 old = ab8500->oldmask[i]; + u8 new = ab8500->mask[i]; + int reg; + + if (new == old) + continue; + + ab8500->oldmask[i] = new; + + reg = AB8500_IT_MASK1_REG + ab8500_irq_regoffset[i]; + ab8500_write(ab8500, reg, new); + } + + mutex_unlock(&ab8500->irq_lock); +} + +static void ab8500_irq_mask(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int offset = irq - ab8500->irq_base; + int index = offset / 8; + int mask = 1 << (offset % 8); + + ab8500->mask[index] |= mask; +} + +static void ab8500_irq_unmask(unsigned int irq) +{ + struct ab8500 *ab8500 = get_irq_chip_data(irq); + int offset = irq - ab8500->irq_base; + int index = offset / 8; + int mask = 1 << (offset % 8); + + ab8500->mask[index] &= ~mask; +} + +static struct irq_chip ab8500_irq_chip = { + .name = "ab8500", + .bus_lock = ab8500_irq_lock, + .bus_sync_unlock = ab8500_irq_sync_unlock, + .mask = ab8500_irq_mask, + .unmask = ab8500_irq_unmask, +}; + +static irqreturn_t ab8500_irq(int irq, void *dev) +{ + struct ab8500 *ab8500 = dev; + int i; + + dev_vdbg(ab8500->dev, "interrupt\n"); + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { + int regoffset = ab8500_irq_regoffset[i]; + int status; + + status = ab8500_read(ab8500, AB8500_IT_LATCH1_REG + regoffset); + if (status <= 0) + continue; + + do { + int bit = __ffs(status); + int line = i * 8 + bit; + + handle_nested_irq(ab8500->irq_base + line); + status &= ~(1 << bit); + } while (status); + } + + return IRQ_HANDLED; +} + +static int ab8500_irq_init(struct ab8500 *ab8500) +{ + int base = ab8500->irq_base; + int irq; + + for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { + set_irq_chip_data(irq, ab8500); + set_irq_chip_and_handler(irq, &ab8500_irq_chip, + handle_simple_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void ab8500_irq_remove(struct ab8500 *ab8500) +{ + int base = ab8500->irq_base; + int irq; + + for (irq = base; irq < base + AB8500_NR_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static struct resource ab8500_gpadc_resources[] = { + { + .name = "HW_CONV_END", + .start = AB8500_INT_GP_HW_ADC_CONV_END, + .end = AB8500_INT_GP_HW_ADC_CONV_END, + .flags = IORESOURCE_IRQ, + }, + { + .name = "SW_CONV_END", + .start = AB8500_INT_GP_SW_ADC_CONV_END, + .end = AB8500_INT_GP_SW_ADC_CONV_END, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource ab8500_rtc_resources[] = { + { + .name = "60S", + .start = AB8500_INT_RTC_60S, + .end = AB8500_INT_RTC_60S, + .flags = IORESOURCE_IRQ, + }, + { + .name = "ALARM", + .start = AB8500_INT_RTC_ALARM, + .end = AB8500_INT_RTC_ALARM, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell ab8500_devs[] = { + { + .name = "ab8500-gpadc", + .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), + .resources = ab8500_gpadc_resources, + }, + { + .name = "ab8500-rtc", + .num_resources = ARRAY_SIZE(ab8500_rtc_resources), + .resources = ab8500_rtc_resources, + }, + { .name = "ab8500-charger", }, + { .name = "ab8500-audio", }, + { .name = "ab8500-usb", }, + { .name = "ab8500-pwm", }, +}; + +int __devinit ab8500_init(struct ab8500 *ab8500) +{ + struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev); + int ret; + int i; + + if (plat) + ab8500->irq_base = plat->irq_base; + + mutex_init(&ab8500->lock); + mutex_init(&ab8500->irq_lock); + + ret = ab8500_read(ab8500, AB8500_REV_REG); + if (ret < 0) + return ret; + + /* + * 0x0 - Early Drop + * 0x10 - Cut 1.0 + * 0x11 - Cut 1.1 + */ + if (ret == 0x0 || ret == 0x10 || ret == 0x11) { + ab8500->revision = ret; + dev_info(ab8500->dev, "detected chip, revision: %#x\n", ret); + } else { + dev_err(ab8500->dev, "unknown chip, revision: %#x\n", ret); + return -EINVAL; + } + + if (plat && plat->init) + plat->init(ab8500); + + /* Clear and mask all interrupts */ + for (i = 0; i < 10; i++) { + ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i); + ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff); + } + + for (i = 18; i < 24; i++) { + ab8500_read(ab8500, AB8500_IT_LATCH1_REG + i); + ab8500_write(ab8500, AB8500_IT_MASK1_REG + i, 0xff); + } + + for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) + ab8500->mask[i] = ab8500->oldmask[i] = 0xff; + + if (ab8500->irq_base) { + ret = ab8500_irq_init(ab8500); + if (ret) + return ret; + + ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq, + IRQF_ONESHOT, "ab8500", ab8500); + if (ret) + goto out_removeirq; + } + + ret = mfd_add_devices(ab8500->dev, -1, ab8500_devs, + ARRAY_SIZE(ab8500_devs), NULL, + ab8500->irq_base); + if (ret) + goto out_freeirq; + + return ret; + +out_freeirq: + if (ab8500->irq_base) { + free_irq(ab8500->irq, ab8500); +out_removeirq: + ab8500_irq_remove(ab8500); + } + return ret; +} + +int __devexit ab8500_exit(struct ab8500 *ab8500) +{ + mfd_remove_devices(ab8500->dev); + if (ab8500->irq_base) { + free_irq(ab8500->irq, ab8500); + ab8500_irq_remove(ab8500); + } + + return 0; +} + +MODULE_AUTHOR("Srinidhi Kasagar, Rabin Vincent"); +MODULE_DESCRIPTION("AB8500 MFD core"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c new file mode 100644 index 000000000000..b81d4f768ef6 --- /dev/null +++ b/drivers/mfd/ab8500-spi.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * This funtion writes to any AB8500 registers using + * SPI protocol & before it writes it packs the data + * in the below 24 bit frame format + * + * *|------------------------------------| + * *| 23|22...18|17.......10|9|8|7......0| + * *| r/w bank adr data | + * * ------------------------------------ + * + * This function shouldn't be called from interrupt + * context + */ +static int ab8500_spi_write(struct ab8500 *ab8500, u16 addr, u8 data) +{ + struct spi_device *spi = container_of(ab8500->dev, struct spi_device, + dev); + unsigned long spi_data = addr << 10 | data; + struct spi_transfer xfer; + struct spi_message msg; + + ab8500->tx_buf[0] = spi_data; + ab8500->rx_buf[0] = 0; + + xfer.tx_buf = ab8500->tx_buf; + xfer.rx_buf = NULL; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(spi, &msg); +} + +static int ab8500_spi_read(struct ab8500 *ab8500, u16 addr) +{ + struct spi_device *spi = container_of(ab8500->dev, struct spi_device, + dev); + unsigned long spi_data = 1 << 23 | addr << 10; + struct spi_transfer xfer; + struct spi_message msg; + int ret; + + ab8500->tx_buf[0] = spi_data; + ab8500->rx_buf[0] = 0; + + xfer.tx_buf = ab8500->tx_buf; + xfer.rx_buf = ab8500->rx_buf; + xfer.len = sizeof(unsigned long); + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + ret = spi_sync(spi, &msg); + if (!ret) + ret = ab8500->rx_buf[0]; + + return ret; +} + +static int __devinit ab8500_spi_probe(struct spi_device *spi) +{ + struct ab8500 *ab8500; + int ret; + + ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); + if (!ab8500) + return -ENOMEM; + + ab8500->dev = &spi->dev; + ab8500->irq = spi->irq; + + ab8500->read = ab8500_spi_read; + ab8500->write = ab8500_spi_write; + + spi_set_drvdata(spi, ab8500); + + ret = ab8500_init(ab8500); + if (ret) + kfree(ab8500); + + return ret; +} + +static int __devexit ab8500_spi_remove(struct spi_device *spi) +{ + struct ab8500 *ab8500 = spi_get_drvdata(spi); + + ab8500_exit(ab8500); + kfree(ab8500); + + return 0; +} + +static struct spi_driver ab8500_spi_driver = { + .driver = { + .name = "ab8500", + .owner = THIS_MODULE, + }, + .probe = ab8500_spi_probe, + .remove = __devexit_p(ab8500_spi_remove) +}; + +static int __init ab8500_spi_init(void) +{ + return spi_register_driver(&ab8500_spi_driver); +} +subsys_initcall(ab8500_spi_init); + +static void __exit ab8500_spi_exit(void) +{ + spi_unregister_driver(&ab8500_spi_driver); +} +module_exit(ab8500_spi_exit); + +MODULE_AUTHOR("Srinidhi KASAGAR + */ + +#include +#include +#include +#include + +static LIST_HEAD(abx500_list); + +struct abx500_device_entry { + struct list_head list; + struct abx500_ops ops; + struct device *dev; +}; + +static void lookup_ops(struct device *dev, struct abx500_ops **ops) +{ + struct abx500_device_entry *dev_entry; + + *ops = NULL; + list_for_each_entry(dev_entry, &abx500_list, list) { + if (dev_entry->dev == dev) { + *ops = &dev_entry->ops; + return; + } + } +} + +int abx500_register_ops(struct device *dev, struct abx500_ops *ops) +{ + struct abx500_device_entry *dev_entry; + + dev_entry = kzalloc(sizeof(struct abx500_device_entry), GFP_KERNEL); + if (IS_ERR(dev_entry)) { + dev_err(dev, "register_ops kzalloc failed"); + return -ENOMEM; + } + dev_entry->dev = dev; + memcpy(&dev_entry->ops, ops, sizeof(struct abx500_ops)); + + list_add_tail(&dev_entry->list, &abx500_list); + return 0; +} +EXPORT_SYMBOL(abx500_register_ops); + +void abx500_remove_ops(struct device *dev) +{ + struct abx500_device_entry *dev_entry, *tmp; + + list_for_each_entry_safe(dev_entry, tmp, &abx500_list, list) + { + if (dev_entry->dev == dev) { + list_del(&dev_entry->list); + kfree(dev_entry); + } + } +} +EXPORT_SYMBOL(abx500_remove_ops); + +int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->set_register != NULL)) + return ops->set_register(dev, bank, reg, value); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_set_register_interruptible); + +int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_register != NULL)) + return ops->get_register(dev, bank, reg, value); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_register_interruptible); + +int abx500_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_register_page != NULL)) + return ops->get_register_page(dev, bank, + first_reg, regvals, numregs); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_register_page_interruptible); + +int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->mask_and_set_register != NULL)) + return ops->mask_and_set_register(dev, bank, + reg, bitmask, bitvalues); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_mask_and_set_register_interruptible); + +int abx500_get_chip_id(struct device *dev) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->get_chip_id != NULL)) + return ops->get_chip_id(dev); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_get_chip_id); + +int abx500_event_registers_startup_state_get(struct device *dev, u8 *event) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->event_registers_startup_state_get != NULL)) + return ops->event_registers_startup_state_get(dev, event); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_event_registers_startup_state_get); + +int abx500_startup_irq_enabled(struct device *dev, unsigned int irq) +{ + struct abx500_ops *ops; + + lookup_ops(dev->parent, &ops); + if ((ops != NULL) && (ops->startup_irq_enabled != NULL)) + return ops->startup_irq_enabled(dev, irq); + else + return -ENOTSUPP; +} +EXPORT_SYMBOL(abx500_startup_irq_enabled); + +MODULE_AUTHOR("Mattias Wallin "); +MODULE_DESCRIPTION("ABX500 core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c index 67181b147ab3..3ad915d0589c 100644 --- a/drivers/mfd/da903x.c +++ b/drivers/mfd/da903x.c @@ -544,6 +544,7 @@ static int __devexit da903x_remove(struct i2c_client *client) struct da903x_chip *chip = i2c_get_clientdata(client); da903x_remove_subdevs(chip); + i2c_set_clientdata(client, NULL); kfree(chip); return 0; } diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c new file mode 100644 index 000000000000..9ed630799acc --- /dev/null +++ b/drivers/mfd/janz-cmodio.c @@ -0,0 +1,304 @@ +/* + * Janz CMOD-IO MODULbus Carrier Board PCI Driver + * + * Copyright (c) 2010 Ira W. Snyder + * + * Lots of inspiration and code was copied from drivers/mfd/sm501.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRV_NAME "janz-cmodio" + +/* Size of each MODULbus module in PCI BAR4 */ +#define CMODIO_MODULBUS_SIZE 0x200 + +/* Maximum number of MODULbus modules on a CMOD-IO carrier board */ +#define CMODIO_MAX_MODULES 4 + +/* Module Parameters */ +static unsigned int num_modules = CMODIO_MAX_MODULES; +static unsigned char *modules[CMODIO_MAX_MODULES] = { + "empty", "empty", "empty", "empty", +}; + +module_param_array(modules, charp, &num_modules, S_IRUGO); +MODULE_PARM_DESC(modules, "MODULbus modules attached to the carrier board"); + +/* Unique Device Id */ +static unsigned int cmodio_id; + +struct cmodio_device { + /* Parent PCI device */ + struct pci_dev *pdev; + + /* PLX control registers */ + struct janz_cmodio_onboard_regs __iomem *ctrl; + + /* hex switch position */ + u8 hex; + + /* mfd-core API */ + struct mfd_cell cells[CMODIO_MAX_MODULES]; + struct resource resources[3 * CMODIO_MAX_MODULES]; + struct janz_platform_data pdata[CMODIO_MAX_MODULES]; +}; + +/* + * Subdevices using the mfd-core API + */ + +static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv, + char *name, unsigned int devno, + unsigned int modno) +{ + struct janz_platform_data *pdata; + struct mfd_cell *cell; + struct resource *res; + struct pci_dev *pci; + + pci = priv->pdev; + cell = &priv->cells[devno]; + res = &priv->resources[devno * 3]; + pdata = &priv->pdata[devno]; + + cell->name = name; + cell->resources = res; + cell->num_resources = 3; + + /* Setup the subdevice ID -- must be unique */ + cell->id = cmodio_id++; + + /* Add platform data */ + pdata->modno = modno; + cell->platform_data = pdata; + cell->data_size = sizeof(*pdata); + + /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */ + res->flags = IORESOURCE_MEM; + res->parent = &pci->resource[3]; + res->start = pci->resource[3].start + (CMODIO_MODULBUS_SIZE * modno); + res->end = res->start + CMODIO_MODULBUS_SIZE - 1; + res++; + + /* PLX Control Registers -- PCI BAR4 is interrupt and other registers */ + res->flags = IORESOURCE_MEM; + res->parent = &pci->resource[4]; + res->start = pci->resource[4].start; + res->end = pci->resource[4].end; + res++; + + /* + * IRQ + * + * The start and end fields are used as an offset to the irq_base + * parameter passed into the mfd_add_devices() function call. All + * devices share the same IRQ. + */ + res->flags = IORESOURCE_IRQ; + res->parent = NULL; + res->start = 0; + res->end = 0; + res++; + + return 0; +} + +/* Probe each submodule using kernel parameters */ +static int __devinit cmodio_probe_submodules(struct cmodio_device *priv) +{ + struct pci_dev *pdev = priv->pdev; + unsigned int num_probed = 0; + char *name; + int i; + + for (i = 0; i < num_modules; i++) { + name = modules[i]; + if (!strcmp(name, "") || !strcmp(name, "empty")) + continue; + + dev_dbg(&priv->pdev->dev, "MODULbus %d: name %s\n", i, name); + cmodio_setup_subdevice(priv, name, num_probed, i); + num_probed++; + } + + /* print an error message if no modules were probed */ + if (num_probed == 0) { + dev_err(&priv->pdev->dev, "no MODULbus modules specified, " + "please set the ``modules'' kernel " + "parameter according to your " + "hardware configuration\n"); + return -ENODEV; + } + + return mfd_add_devices(&pdev->dev, 0, priv->cells, + num_probed, NULL, pdev->irq); +} + +/* + * SYSFS Attributes + */ + +static ssize_t mbus_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cmodio_device *priv = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%x\n", priv->hex); +} + +static DEVICE_ATTR(modulbus_number, S_IRUGO, mbus_show, NULL); + +static struct attribute *cmodio_sysfs_attrs[] = { + &dev_attr_modulbus_number.attr, + NULL, +}; + +static const struct attribute_group cmodio_sysfs_attr_group = { + .attrs = cmodio_sysfs_attrs, +}; + +/* + * PCI Driver + */ + +static int __devinit cmodio_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct cmodio_device *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&dev->dev, "unable to allocate private data\n"); + ret = -ENOMEM; + goto out_return; + } + + pci_set_drvdata(dev, priv); + priv->pdev = dev; + + /* Hardware Initialization */ + ret = pci_enable_device(dev); + if (ret) { + dev_err(&dev->dev, "unable to enable device\n"); + goto out_free_priv; + } + + pci_set_master(dev); + ret = pci_request_regions(dev, DRV_NAME); + if (ret) { + dev_err(&dev->dev, "unable to request regions\n"); + goto out_pci_disable_device; + } + + /* Onboard configuration registers */ + priv->ctrl = pci_ioremap_bar(dev, 4); + if (!priv->ctrl) { + dev_err(&dev->dev, "unable to remap onboard regs\n"); + ret = -ENOMEM; + goto out_pci_release_regions; + } + + /* Read the hex switch on the carrier board */ + priv->hex = ioread8(&priv->ctrl->int_enable); + + /* Add the MODULbus number (hex switch value) to the device's sysfs */ + ret = sysfs_create_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); + if (ret) { + dev_err(&dev->dev, "unable to create sysfs attributes\n"); + goto out_unmap_ctrl; + } + + /* + * Disable all interrupt lines, each submodule will enable its + * own interrupt line if needed + */ + iowrite8(0xf, &priv->ctrl->int_disable); + + /* Register drivers for all submodules */ + ret = cmodio_probe_submodules(priv); + if (ret) { + dev_err(&dev->dev, "unable to probe submodules\n"); + goto out_sysfs_remove_group; + } + + return 0; + +out_sysfs_remove_group: + sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); +out_unmap_ctrl: + iounmap(priv->ctrl); +out_pci_release_regions: + pci_release_regions(dev); +out_pci_disable_device: + pci_disable_device(dev); +out_free_priv: + kfree(priv); +out_return: + return ret; +} + +static void __devexit cmodio_pci_remove(struct pci_dev *dev) +{ + struct cmodio_device *priv = pci_get_drvdata(dev); + + mfd_remove_devices(&dev->dev); + sysfs_remove_group(&dev->dev.kobj, &cmodio_sysfs_attr_group); + iounmap(priv->ctrl); + pci_release_regions(dev); + pci_disable_device(dev); + kfree(priv); +} + +#define PCI_VENDOR_ID_JANZ 0x13c3 + +/* The list of devices that this module will support */ +static DEFINE_PCI_DEVICE_TABLE(cmodio_pci_ids) = { + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, cmodio_pci_ids); + +static struct pci_driver cmodio_pci_driver = { + .name = DRV_NAME, + .id_table = cmodio_pci_ids, + .probe = cmodio_pci_probe, + .remove = __devexit_p(cmodio_pci_remove), +}; + +/* + * Module Init / Exit + */ + +static int __init cmodio_init(void) +{ + return pci_register_driver(&cmodio_pci_driver); +} + +static void __exit cmodio_exit(void) +{ + pci_unregister_driver(&cmodio_pci_driver); +} + +MODULE_AUTHOR("Ira W. Snyder "); +MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver"); +MODULE_LICENSE("GPL"); + +module_init(cmodio_init); +module_exit(cmodio_exit); diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 85d63c04749b..f621bcea3d02 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -508,7 +508,7 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, max8925_reg_read(chip->i2c, MAX8925_ON_OFF_IRQ2); max8925_reg_read(chip->rtc, MAX8925_RTC_IRQ); max8925_reg_read(chip->adc, MAX8925_TSC_IRQ); - /* mask all interrupts */ + /* mask all interrupts except for TSC */ max8925_reg_write(chip->rtc, MAX8925_ALARM0_CNTL, 0); max8925_reg_write(chip->rtc, MAX8925_ALARM1_CNTL, 0); max8925_reg_write(chip->i2c, MAX8925_CHG_IRQ1_MASK, 0xff); @@ -516,7 +516,6 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff); max8925_reg_write(chip->i2c, MAX8925_ON_OFF_IRQ2_MASK, 0xff); max8925_reg_write(chip->rtc, MAX8925_RTC_IRQ_MASK, 0xff); - max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0xff); mutex_init(&chip->irq_lock); chip->core_irq = irq; @@ -547,7 +546,11 @@ static int max8925_irq_init(struct max8925_chip *chip, int irq, dev_err(chip->dev, "Failed to request core IRQ: %d\n", ret); chip->core_irq = 0; } + tsc_irq: + /* mask TSC interrupt */ + max8925_reg_write(chip->adc, MAX8925_TSC_IRQ_MASK, 0x0f); + if (!pdata->tsc_irq) { dev_warn(chip->dev, "No interrupt support on TSC IRQ\n"); return 0; diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c index d9fd8785da4d..e73f3f5252a8 100644 --- a/drivers/mfd/max8925-i2c.c +++ b/drivers/mfd/max8925-i2c.c @@ -173,8 +173,6 @@ static int __devexit max8925_remove(struct i2c_client *client) max8925_device_exit(chip); i2c_unregister_device(chip->adc); i2c_unregister_device(chip->rtc); - i2c_set_clientdata(chip->adc, NULL); - i2c_set_clientdata(chip->rtc, NULL); i2c_set_clientdata(chip->i2c, NULL); kfree(chip); return 0; diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c index 1f68ecadddc2..fecf38a4f025 100644 --- a/drivers/mfd/mc13783-core.c +++ b/drivers/mfd/mc13783-core.c @@ -679,6 +679,10 @@ err_revision: if (pdata->flags & MC13783_USE_TOUCHSCREEN) mc13783_add_subdevice(mc13783, "mc13783-ts"); + if (pdata->flags & MC13783_USE_LED) + mc13783_add_subdevice_pdata(mc13783, "mc13783-led", + pdata->leds, sizeof(*pdata->leds)); + return 0; } diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c index a94b131a18ef..721948be12c7 100644 --- a/drivers/mfd/menelaus.c +++ b/drivers/mfd/menelaus.c @@ -1228,6 +1228,7 @@ fail2: free_irq(client->irq, menelaus); flush_scheduled_work(); fail1: + i2c_set_clientdata(client, NULL); kfree(menelaus); return err; } @@ -1237,8 +1238,8 @@ static int __exit menelaus_remove(struct i2c_client *client) struct menelaus_chip *menelaus = i2c_get_clientdata(client); free_irq(client->irq, menelaus); - kfree(menelaus); i2c_set_clientdata(client, NULL); + kfree(menelaus); the_menelaus = NULL; return 0; } diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 8ffbb7a85a7e..7dd76bceaae8 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -48,7 +48,7 @@ static int mfd_add_device(struct device *parent, int id, res[r].flags = cell->resources[r].flags; /* Find out base to use */ - if (cell->resources[r].flags & IORESOURCE_MEM) { + if ((cell->resources[r].flags & IORESOURCE_MEM) && mem_base) { res[r].parent = mem_base; res[r].start = mem_base->start + cell->resources[r].start; diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c index fe8f922f6654..aed0d2a9b032 100644 --- a/drivers/mfd/pcf50633-adc.c +++ b/drivers/mfd/pcf50633-adc.c @@ -30,13 +30,13 @@ struct pcf50633_adc_request { int mux; int avg; - int result; void (*callback)(struct pcf50633 *, void *, int); void *callback_param; +}; - /* Used in case of sync requests */ +struct pcf50633_adc_sync_request { + int result; struct completion completion; - }; #define PCF50633_MAX_ADC_FIFO_DEPTH 8 @@ -109,10 +109,10 @@ adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) return 0; } -static void -pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) +static void pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, + int result) { - struct pcf50633_adc_request *req = param; + struct pcf50633_adc_sync_request *req = param; req->result = result; complete(&req->completion); @@ -120,28 +120,19 @@ pcf50633_adc_sync_read_callback(struct pcf50633 *pcf, void *param, int result) int pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg) { - struct pcf50633_adc_request *req; - int err; + struct pcf50633_adc_sync_request req; + int ret; - /* req is freed when the result is ready, in interrupt handler */ - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) - return -ENOMEM; - - req->mux = mux; - req->avg = avg; - req->callback = pcf50633_adc_sync_read_callback; - req->callback_param = req; + init_completion(&req.completion); - init_completion(&req->completion); - err = adc_enqueue_request(pcf, req); - if (err) - return err; + ret = pcf50633_adc_async_read(pcf, mux, avg, + pcf50633_adc_sync_read_callback, &req); + if (ret) + return ret; - wait_for_completion(&req->completion); + wait_for_completion(&req.completion); - /* FIXME by this time req might be already freed */ - return req->result; + return req.result; } EXPORT_SYMBOL_GPL(pcf50633_adc_sync_read); diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 63a614d696c1..704736e6e9b9 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -21,16 +21,16 @@ #include #include #include -#include #include #include -/* Two MBCS registers used during cold start */ -#define PCF50633_REG_MBCS1 0x4b -#define PCF50633_REG_MBCS2 0x4c -#define PCF50633_MBCS1_USBPRES 0x01 -#define PCF50633_MBCS1_ADAPTPRES 0x01 +int pcf50633_irq_init(struct pcf50633 *pcf, int irq); +void pcf50633_irq_free(struct pcf50633 *pcf); +#ifdef CONFIG_PM +int pcf50633_irq_suspend(struct pcf50633 *pcf); +int pcf50633_irq_resume(struct pcf50633 *pcf); +#endif static int __pcf50633_read(struct pcf50633 *pcf, u8 reg, int num, u8 *data) { @@ -215,244 +215,6 @@ static struct attribute_group pcf_attr_group = { .attrs = pcf_sysfs_entries, }; -int pcf50633_register_irq(struct pcf50633 *pcf, int irq, - void (*handler) (int, void *), void *data) -{ - if (irq < 0 || irq > PCF50633_NUM_IRQ || !handler) - return -EINVAL; - - if (WARN_ON(pcf->irq_handler[irq].handler)) - return -EBUSY; - - mutex_lock(&pcf->lock); - pcf->irq_handler[irq].handler = handler; - pcf->irq_handler[irq].data = data; - mutex_unlock(&pcf->lock); - - return 0; -} -EXPORT_SYMBOL_GPL(pcf50633_register_irq); - -int pcf50633_free_irq(struct pcf50633 *pcf, int irq) -{ - if (irq < 0 || irq > PCF50633_NUM_IRQ) - return -EINVAL; - - mutex_lock(&pcf->lock); - pcf->irq_handler[irq].handler = NULL; - mutex_unlock(&pcf->lock); - - return 0; -} -EXPORT_SYMBOL_GPL(pcf50633_free_irq); - -static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask) -{ - u8 reg, bits, tmp; - int ret = 0, idx; - - idx = irq >> 3; - reg = PCF50633_REG_INT1M + idx; - bits = 1 << (irq & 0x07); - - mutex_lock(&pcf->lock); - - if (mask) { - ret = __pcf50633_read(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - tmp |= bits; - - ret = __pcf50633_write(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - pcf->mask_regs[idx] &= ~bits; - pcf->mask_regs[idx] |= bits; - } else { - ret = __pcf50633_read(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - tmp &= ~bits; - - ret = __pcf50633_write(pcf, reg, 1, &tmp); - if (ret < 0) - goto out; - - pcf->mask_regs[idx] &= ~bits; - } -out: - mutex_unlock(&pcf->lock); - - return ret; -} - -int pcf50633_irq_mask(struct pcf50633 *pcf, int irq) -{ - dev_dbg(pcf->dev, "Masking IRQ %d\n", irq); - - return __pcf50633_irq_mask_set(pcf, irq, 1); -} -EXPORT_SYMBOL_GPL(pcf50633_irq_mask); - -int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq) -{ - dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq); - - return __pcf50633_irq_mask_set(pcf, irq, 0); -} -EXPORT_SYMBOL_GPL(pcf50633_irq_unmask); - -int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq) -{ - u8 reg, bits; - - reg = irq >> 3; - bits = 1 << (irq & 0x07); - - return pcf->mask_regs[reg] & bits; -} -EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get); - -static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq) -{ - if (pcf->irq_handler[irq].handler) - pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data); -} - -/* Maximum amount of time ONKEY is held before emergency action is taken */ -#define PCF50633_ONKEY1S_TIMEOUT 8 - -static void pcf50633_irq_worker(struct work_struct *work) -{ - struct pcf50633 *pcf; - int ret, i, j; - u8 pcf_int[5], chgstat; - - pcf = container_of(work, struct pcf50633, irq_work); - - /* Read the 5 INT regs in one transaction */ - ret = pcf50633_read_block(pcf, PCF50633_REG_INT1, - ARRAY_SIZE(pcf_int), pcf_int); - if (ret != ARRAY_SIZE(pcf_int)) { - dev_err(pcf->dev, "Error reading INT registers\n"); - - /* - * If this doesn't ACK the interrupt to the chip, we'll be - * called once again as we're level triggered. - */ - goto out; - } - - /* defeat 8s death from lowsys on A5 */ - pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04); - - /* We immediately read the usb and adapter status. We thus make sure - * only of USBINS/USBREM IRQ handlers are called */ - if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) { - chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); - if (chgstat & (0x3 << 4)) - pcf_int[0] &= ~(1 << PCF50633_INT1_USBREM); - else - pcf_int[0] &= ~(1 << PCF50633_INT1_USBINS); - } - - /* Make sure only one of ADPINS or ADPREM is set */ - if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) { - chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); - if (chgstat & (0x3 << 4)) - pcf_int[0] &= ~(1 << PCF50633_INT1_ADPREM); - else - pcf_int[0] &= ~(1 << PCF50633_INT1_ADPINS); - } - - dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x " - "INT4=0x%02x INT5=0x%02x\n", pcf_int[0], - pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]); - - /* Some revisions of the chip don't have a 8s standby mode on - * ONKEY1S press. We try to manually do it in such cases. */ - if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) { - dev_info(pcf->dev, "ONKEY1S held for %d secs\n", - pcf->onkey1s_held); - if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT) - if (pcf->pdata->force_shutdown) - pcf->pdata->force_shutdown(pcf); - } - - if (pcf_int[2] & PCF50633_INT3_ONKEY1S) { - dev_info(pcf->dev, "ONKEY1S held\n"); - pcf->onkey1s_held = 1 ; - - /* Unmask IRQ_SECOND */ - pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M, - PCF50633_INT1_SECOND); - - /* Unmask IRQ_ONKEYR */ - pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M, - PCF50633_INT2_ONKEYR); - } - - if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) { - pcf->onkey1s_held = 0; - - /* Mask SECOND and ONKEYR interrupts */ - if (pcf->mask_regs[0] & PCF50633_INT1_SECOND) - pcf50633_reg_set_bit_mask(pcf, - PCF50633_REG_INT1M, - PCF50633_INT1_SECOND, - PCF50633_INT1_SECOND); - - if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR) - pcf50633_reg_set_bit_mask(pcf, - PCF50633_REG_INT2M, - PCF50633_INT2_ONKEYR, - PCF50633_INT2_ONKEYR); - } - - /* Have we just resumed ? */ - if (pcf->is_suspended) { - pcf->is_suspended = 0; - - /* Set the resume reason filtering out non resumers */ - for (i = 0; i < ARRAY_SIZE(pcf_int); i++) - pcf->resume_reason[i] = pcf_int[i] & - pcf->pdata->resumers[i]; - - /* Make sure we don't pass on any ONKEY events to - * userspace now */ - pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF); - } - - for (i = 0; i < ARRAY_SIZE(pcf_int); i++) { - /* Unset masked interrupts */ - pcf_int[i] &= ~pcf->mask_regs[i]; - - for (j = 0; j < 8 ; j++) - if (pcf_int[i] & (1 << j)) - pcf50633_irq_call_handler(pcf, (i * 8) + j); - } - -out: - put_device(pcf->dev); - enable_irq(pcf->irq); -} - -static irqreturn_t pcf50633_irq(int irq, void *data) -{ - struct pcf50633 *pcf = data; - - dev_dbg(pcf->dev, "pcf50633_irq\n"); - - get_device(pcf->dev); - disable_irq_nosync(pcf->irq); - queue_work(pcf->work_queue, &pcf->irq_work); - - return IRQ_HANDLED; -} - static void pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, struct platform_device **pdev) @@ -479,70 +241,17 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name, static int pcf50633_suspend(struct i2c_client *client, pm_message_t state) { struct pcf50633 *pcf; - int ret = 0, i; - u8 res[5]; - pcf = i2c_get_clientdata(client); - /* Make sure our interrupt handlers are not called - * henceforth */ - disable_irq(pcf->irq); - - /* Make sure that any running IRQ worker has quit */ - cancel_work_sync(&pcf->irq_work); - - /* Save the masks */ - ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(pcf->suspend_irq_masks), - pcf->suspend_irq_masks); - if (ret < 0) { - dev_err(pcf->dev, "error saving irq masks\n"); - goto out; - } - - /* Write wakeup irq masks */ - for (i = 0; i < ARRAY_SIZE(res); i++) - res[i] = ~pcf->pdata->resumers[i]; - - ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(res), &res[0]); - if (ret < 0) { - dev_err(pcf->dev, "error writing wakeup irq masks\n"); - goto out; - } - - pcf->is_suspended = 1; - -out: - return ret; + return pcf50633_irq_suspend(pcf); } static int pcf50633_resume(struct i2c_client *client) { struct pcf50633 *pcf; - int ret; - pcf = i2c_get_clientdata(client); - /* Write the saved mask registers */ - ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, - ARRAY_SIZE(pcf->suspend_irq_masks), - pcf->suspend_irq_masks); - if (ret < 0) - dev_err(pcf->dev, "Error restoring saved suspend masks\n"); - - /* Restore regulators' state */ - - - get_device(pcf->dev); - - /* - * Clear any pending interrupts and set resume reason if any. - * This will leave with enable_irq() - */ - pcf50633_irq_worker(&pcf->irq_work); - - return 0; + return pcf50633_irq_resume(pcf); } #else #define pcf50633_suspend NULL @@ -573,43 +282,19 @@ static int __devinit pcf50633_probe(struct i2c_client *client, i2c_set_clientdata(client, pcf); pcf->dev = &client->dev; pcf->i2c_client = client; - pcf->irq = client->irq; - pcf->work_queue = create_singlethread_workqueue("pcf50633"); - - if (!pcf->work_queue) { - dev_err(&client->dev, "Failed to alloc workqueue\n"); - ret = -ENOMEM; - goto err_free; - } - - INIT_WORK(&pcf->irq_work, pcf50633_irq_worker); version = pcf50633_reg_read(pcf, 0); variant = pcf50633_reg_read(pcf, 1); if (version < 0 || variant < 0) { dev_err(pcf->dev, "Unable to probe pcf50633\n"); ret = -ENODEV; - goto err_destroy_workqueue; + goto err_free; } dev_info(pcf->dev, "Probed device version %d variant %d\n", version, variant); - /* Enable all interrupts except RTC SECOND */ - pcf->mask_regs[0] = 0x80; - pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]); - pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00); - pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00); - - ret = request_irq(client->irq, pcf50633_irq, - IRQF_TRIGGER_LOW, "pcf50633", pcf); - - if (ret) { - dev_err(pcf->dev, "Failed to request IRQ %d\n", ret); - goto err_destroy_workqueue; - } + pcf50633_irq_init(pcf, client->irq); /* Create sub devices */ pcf50633_client_dev_register(pcf, "pcf50633-input", @@ -620,6 +305,9 @@ static int __devinit pcf50633_probe(struct i2c_client *client, &pcf->mbc_pdev); pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev); + pcf50633_client_dev_register(pcf, "pcf50633-backlight", + &pcf->bl_pdev); + for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { struct platform_device *pdev; @@ -638,10 +326,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client, platform_device_add(pdev); } - if (enable_irq_wake(client->irq) < 0) - dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source" - "in this hardware revision", client->irq); - ret = sysfs_create_group(&client->dev.kobj, &pcf_attr_group); if (ret) dev_err(pcf->dev, "error creating sysfs entries\n"); @@ -651,8 +335,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client, return 0; -err_destroy_workqueue: - destroy_workqueue(pcf->work_queue); err_free: i2c_set_clientdata(client, NULL); kfree(pcf); @@ -665,8 +347,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) struct pcf50633 *pcf = i2c_get_clientdata(client); int i; - free_irq(pcf->irq, pcf); - destroy_workqueue(pcf->work_queue); + pcf50633_irq_free(pcf); platform_device_unregister(pcf->input_pdev); platform_device_unregister(pcf->rtc_pdev); @@ -676,6 +357,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client) for (i = 0; i < PCF50633_NUM_REGULATORS; i++) platform_device_unregister(pcf->regulator_pdev[i]); + i2c_set_clientdata(client, NULL); kfree(pcf); return 0; diff --git a/drivers/mfd/pcf50633-irq.c b/drivers/mfd/pcf50633-irq.c new file mode 100644 index 000000000000..1b0192f1efff --- /dev/null +++ b/drivers/mfd/pcf50633-irq.c @@ -0,0 +1,318 @@ +/* NXP PCF50633 Power Management Unit (PMU) driver + * + * (C) 2006-2008 by Openmoko, Inc. + * Author: Harald Welte + * Balaji Rao + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include + +#include + +/* Two MBCS registers used during cold start */ +#define PCF50633_REG_MBCS1 0x4b +#define PCF50633_REG_MBCS2 0x4c +#define PCF50633_MBCS1_USBPRES 0x01 +#define PCF50633_MBCS1_ADAPTPRES 0x01 + +int pcf50633_register_irq(struct pcf50633 *pcf, int irq, + void (*handler) (int, void *), void *data) +{ + if (irq < 0 || irq >= PCF50633_NUM_IRQ || !handler) + return -EINVAL; + + if (WARN_ON(pcf->irq_handler[irq].handler)) + return -EBUSY; + + mutex_lock(&pcf->lock); + pcf->irq_handler[irq].handler = handler; + pcf->irq_handler[irq].data = data; + mutex_unlock(&pcf->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(pcf50633_register_irq); + +int pcf50633_free_irq(struct pcf50633 *pcf, int irq) +{ + if (irq < 0 || irq >= PCF50633_NUM_IRQ) + return -EINVAL; + + mutex_lock(&pcf->lock); + pcf->irq_handler[irq].handler = NULL; + mutex_unlock(&pcf->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(pcf50633_free_irq); + +static int __pcf50633_irq_mask_set(struct pcf50633 *pcf, int irq, u8 mask) +{ + u8 reg, bit; + int ret = 0, idx; + + idx = irq >> 3; + reg = PCF50633_REG_INT1M + idx; + bit = 1 << (irq & 0x07); + + pcf50633_reg_set_bit_mask(pcf, reg, bit, mask ? bit : 0); + + mutex_lock(&pcf->lock); + + if (mask) + pcf->mask_regs[idx] |= bit; + else + pcf->mask_regs[idx] &= ~bit; + + mutex_unlock(&pcf->lock); + + return ret; +} + +int pcf50633_irq_mask(struct pcf50633 *pcf, int irq) +{ + dev_dbg(pcf->dev, "Masking IRQ %d\n", irq); + + return __pcf50633_irq_mask_set(pcf, irq, 1); +} +EXPORT_SYMBOL_GPL(pcf50633_irq_mask); + +int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq) +{ + dev_dbg(pcf->dev, "Unmasking IRQ %d\n", irq); + + return __pcf50633_irq_mask_set(pcf, irq, 0); +} +EXPORT_SYMBOL_GPL(pcf50633_irq_unmask); + +int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq) +{ + u8 reg, bits; + + reg = irq >> 3; + bits = 1 << (irq & 0x07); + + return pcf->mask_regs[reg] & bits; +} +EXPORT_SYMBOL_GPL(pcf50633_irq_mask_get); + +static void pcf50633_irq_call_handler(struct pcf50633 *pcf, int irq) +{ + if (pcf->irq_handler[irq].handler) + pcf->irq_handler[irq].handler(irq, pcf->irq_handler[irq].data); +} + +/* Maximum amount of time ONKEY is held before emergency action is taken */ +#define PCF50633_ONKEY1S_TIMEOUT 8 + +static irqreturn_t pcf50633_irq(int irq, void *data) +{ + struct pcf50633 *pcf = data; + int ret, i, j; + u8 pcf_int[5], chgstat; + + /* Read the 5 INT regs in one transaction */ + ret = pcf50633_read_block(pcf, PCF50633_REG_INT1, + ARRAY_SIZE(pcf_int), pcf_int); + if (ret != ARRAY_SIZE(pcf_int)) { + dev_err(pcf->dev, "Error reading INT registers\n"); + + /* + * If this doesn't ACK the interrupt to the chip, we'll be + * called once again as we're level triggered. + */ + goto out; + } + + /* defeat 8s death from lowsys on A5 */ + pcf50633_reg_write(pcf, PCF50633_REG_OOCSHDWN, 0x04); + + /* We immediately read the usb and adapter status. We thus make sure + * only of USBINS/USBREM IRQ handlers are called */ + if (pcf_int[0] & (PCF50633_INT1_USBINS | PCF50633_INT1_USBREM)) { + chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + if (chgstat & (0x3 << 4)) + pcf_int[0] &= ~PCF50633_INT1_USBREM; + else + pcf_int[0] &= ~PCF50633_INT1_USBINS; + } + + /* Make sure only one of ADPINS or ADPREM is set */ + if (pcf_int[0] & (PCF50633_INT1_ADPINS | PCF50633_INT1_ADPREM)) { + chgstat = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + if (chgstat & (0x3 << 4)) + pcf_int[0] &= ~PCF50633_INT1_ADPREM; + else + pcf_int[0] &= ~PCF50633_INT1_ADPINS; + } + + dev_dbg(pcf->dev, "INT1=0x%02x INT2=0x%02x INT3=0x%02x " + "INT4=0x%02x INT5=0x%02x\n", pcf_int[0], + pcf_int[1], pcf_int[2], pcf_int[3], pcf_int[4]); + + /* Some revisions of the chip don't have a 8s standby mode on + * ONKEY1S press. We try to manually do it in such cases. */ + if ((pcf_int[0] & PCF50633_INT1_SECOND) && pcf->onkey1s_held) { + dev_info(pcf->dev, "ONKEY1S held for %d secs\n", + pcf->onkey1s_held); + if (pcf->onkey1s_held++ == PCF50633_ONKEY1S_TIMEOUT) + if (pcf->pdata->force_shutdown) + pcf->pdata->force_shutdown(pcf); + } + + if (pcf_int[2] & PCF50633_INT3_ONKEY1S) { + dev_info(pcf->dev, "ONKEY1S held\n"); + pcf->onkey1s_held = 1 ; + + /* Unmask IRQ_SECOND */ + pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT1M, + PCF50633_INT1_SECOND); + + /* Unmask IRQ_ONKEYR */ + pcf50633_reg_clear_bits(pcf, PCF50633_REG_INT2M, + PCF50633_INT2_ONKEYR); + } + + if ((pcf_int[1] & PCF50633_INT2_ONKEYR) && pcf->onkey1s_held) { + pcf->onkey1s_held = 0; + + /* Mask SECOND and ONKEYR interrupts */ + if (pcf->mask_regs[0] & PCF50633_INT1_SECOND) + pcf50633_reg_set_bit_mask(pcf, + PCF50633_REG_INT1M, + PCF50633_INT1_SECOND, + PCF50633_INT1_SECOND); + + if (pcf->mask_regs[1] & PCF50633_INT2_ONKEYR) + pcf50633_reg_set_bit_mask(pcf, + PCF50633_REG_INT2M, + PCF50633_INT2_ONKEYR, + PCF50633_INT2_ONKEYR); + } + + /* Have we just resumed ? */ + if (pcf->is_suspended) { + pcf->is_suspended = 0; + + /* Set the resume reason filtering out non resumers */ + for (i = 0; i < ARRAY_SIZE(pcf_int); i++) + pcf->resume_reason[i] = pcf_int[i] & + pcf->pdata->resumers[i]; + + /* Make sure we don't pass on any ONKEY events to + * userspace now */ + pcf_int[1] &= ~(PCF50633_INT2_ONKEYR | PCF50633_INT2_ONKEYF); + } + + for (i = 0; i < ARRAY_SIZE(pcf_int); i++) { + /* Unset masked interrupts */ + pcf_int[i] &= ~pcf->mask_regs[i]; + + for (j = 0; j < 8 ; j++) + if (pcf_int[i] & (1 << j)) + pcf50633_irq_call_handler(pcf, (i * 8) + j); + } + +out: + return IRQ_HANDLED; +} + +#ifdef CONFIG_PM + +int pcf50633_irq_suspend(struct pcf50633 *pcf) +{ + int ret; + int i; + u8 res[5]; + + + /* Make sure our interrupt handlers are not called + * henceforth */ + disable_irq(pcf->irq); + + /* Save the masks */ + ret = pcf50633_read_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(pcf->suspend_irq_masks), + pcf->suspend_irq_masks); + if (ret < 0) { + dev_err(pcf->dev, "error saving irq masks\n"); + goto out; + } + + /* Write wakeup irq masks */ + for (i = 0; i < ARRAY_SIZE(res); i++) + res[i] = ~pcf->pdata->resumers[i]; + + ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(res), &res[0]); + if (ret < 0) { + dev_err(pcf->dev, "error writing wakeup irq masks\n"); + goto out; + } + + pcf->is_suspended = 1; + +out: + return ret; +} + +int pcf50633_irq_resume(struct pcf50633 *pcf) +{ + int ret; + + /* Write the saved mask registers */ + ret = pcf50633_write_block(pcf, PCF50633_REG_INT1M, + ARRAY_SIZE(pcf->suspend_irq_masks), + pcf->suspend_irq_masks); + if (ret < 0) + dev_err(pcf->dev, "Error restoring saved suspend masks\n"); + + enable_irq(pcf->irq); + + return ret; +} + +#endif + +int pcf50633_irq_init(struct pcf50633 *pcf, int irq) +{ + int ret; + + pcf->irq = irq; + + /* Enable all interrupts except RTC SECOND */ + pcf->mask_regs[0] = 0x80; + pcf50633_reg_write(pcf, PCF50633_REG_INT1M, pcf->mask_regs[0]); + pcf50633_reg_write(pcf, PCF50633_REG_INT2M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT3M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT4M, 0x00); + pcf50633_reg_write(pcf, PCF50633_REG_INT5M, 0x00); + + ret = request_threaded_irq(irq, NULL, pcf50633_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "pcf50633", pcf); + + if (ret) + dev_err(pcf->dev, "Failed to request IRQ %d\n", ret); + + if (enable_irq_wake(irq) < 0) + dev_err(pcf->dev, "IRQ %u cannot be enabled as wake-up source" + "in this hardware revision", irq); + + return ret; +} + +void pcf50633_irq_free(struct pcf50633 *pcf) +{ + free_irq(pcf->irq, pcf); +} diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c new file mode 100644 index 000000000000..50922975bda3 --- /dev/null +++ b/drivers/mfd/rdc321x-southbridge.c @@ -0,0 +1,123 @@ +/* + * RDC321x MFD southbrige driver + * + * Copyright (C) 2007-2010 Florian Fainelli + * Copyright (C) 2010 Bernhard Loos + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ +#include +#include +#include +#include +#include +#include +#include + +static struct rdc321x_wdt_pdata rdc321x_wdt_pdata; + +static struct resource rdc321x_wdt_resource[] = { + { + .name = "wdt-reg", + .start = RDC321X_WDT_CTRL, + .end = RDC321X_WDT_CTRL + 0x3, + .flags = IORESOURCE_IO, + } +}; + +static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = { + .max_gpios = RDC321X_MAX_GPIO, +}; + +static struct resource rdc321x_gpio_resources[] = { + { + .name = "gpio-reg1", + .start = RDC321X_GPIO_CTRL_REG1, + .end = RDC321X_GPIO_CTRL_REG1 + 0x7, + .flags = IORESOURCE_IO, + }, { + .name = "gpio-reg2", + .start = RDC321X_GPIO_CTRL_REG2, + .end = RDC321X_GPIO_CTRL_REG2 + 0x7, + .flags = IORESOURCE_IO, + } +}; + +static struct mfd_cell rdc321x_sb_cells[] = { + { + .name = "rdc321x-wdt", + .resources = rdc321x_wdt_resource, + .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), + .driver_data = &rdc321x_wdt_pdata, + }, { + .name = "rdc321x-gpio", + .resources = rdc321x_gpio_resources, + .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), + .driver_data = &rdc321x_gpio_pdata, + }, +}; + +static int __devinit rdc321x_sb_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable device\n"); + return err; + } + + rdc321x_gpio_pdata.sb_pdev = pdev; + rdc321x_wdt_pdata.sb_pdev = pdev; + + return mfd_add_devices(&pdev->dev, -1, + rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0); +} + +static void __devexit rdc321x_sb_remove(struct pci_dev *pdev) +{ + mfd_remove_devices(&pdev->dev); +} + +static DEFINE_PCI_DEVICE_TABLE(rdc321x_sb_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) }, + {} +}; + +static struct pci_driver rdc321x_sb_driver = { + .name = "RDC321x Southbridge", + .id_table = rdc321x_sb_table, + .probe = rdc321x_sb_probe, + .remove = __devexit_p(rdc321x_sb_remove), +}; + +static int __init rdc321x_sb_init(void) +{ + return pci_register_driver(&rdc321x_sb_driver); +} + +static void __exit rdc321x_sb_exit(void) +{ + pci_unregister_driver(&rdc321x_sb_driver); +} + +module_init(rdc321x_sb_init); +module_exit(rdc321x_sb_exit); + +MODULE_AUTHOR("Florian Fainelli "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver"); diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index da6383a934ac..5041d33adf0b 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c @@ -318,6 +318,9 @@ static int t7l66xb_probe(struct platform_device *dev) struct resource *iomem, *rscr; int ret; + if (pdata == NULL) + return -EINVAL; + iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; diff --git a/drivers/mfd/tc35892.c b/drivers/mfd/tc35892.c new file mode 100644 index 000000000000..715f095dd7a6 --- /dev/null +++ b/drivers/mfd/tc35892.c @@ -0,0 +1,347 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + * Author: Hanumath Prasad for ST-Ericsson + * Author: Rabin Vincent for ST-Ericsson + */ + +#include +#include +#include +#include +#include +#include +#include + +/** + * tc35892_reg_read() - read a single TC35892 register + * @tc35892: Device to read from + * @reg: Register to read + */ +int tc35892_reg_read(struct tc35892 *tc35892, u8 reg) +{ + int ret; + + ret = i2c_smbus_read_byte_data(tc35892->i2c, reg); + if (ret < 0) + dev_err(tc35892->dev, "failed to read reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_read); + +/** + * tc35892_reg_read() - write a single TC35892 register + * @tc35892: Device to write to + * @reg: Register to read + * @data: Value to write + */ +int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data) +{ + int ret; + + ret = i2c_smbus_write_byte_data(tc35892->i2c, reg, data); + if (ret < 0) + dev_err(tc35892->dev, "failed to write reg %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_reg_write); + +/** + * tc35892_block_read() - read multiple TC35892 registers + * @tc35892: Device to read from + * @reg: First register + * @length: Number of registers + * @values: Buffer to write to + */ +int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, u8 *values) +{ + int ret; + + ret = i2c_smbus_read_i2c_block_data(tc35892->i2c, reg, length, values); + if (ret < 0) + dev_err(tc35892->dev, "failed to read regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_read); + +/** + * tc35892_block_write() - write multiple TC35892 registers + * @tc35892: Device to write to + * @reg: First register + * @length: Number of registers + * @values: Values to write + */ +int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length, + const u8 *values) +{ + int ret; + + ret = i2c_smbus_write_i2c_block_data(tc35892->i2c, reg, length, + values); + if (ret < 0) + dev_err(tc35892->dev, "failed to write regs %#x: %d\n", + reg, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_block_write); + +/** + * tc35892_set_bits() - set the value of a bitfield in a TC35892 register + * @tc35892: Device to write to + * @reg: Register to write + * @mask: Mask of bits to set + * @values: Value to set + */ +int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val) +{ + int ret; + + mutex_lock(&tc35892->lock); + + ret = tc35892_reg_read(tc35892, reg); + if (ret < 0) + goto out; + + ret &= ~mask; + ret |= val; + + ret = tc35892_reg_write(tc35892, reg, ret); + +out: + mutex_unlock(&tc35892->lock); + return ret; +} +EXPORT_SYMBOL_GPL(tc35892_set_bits); + +static struct resource gpio_resources[] = { + { + .start = TC35892_INT_GPIIRQ, + .end = TC35892_INT_GPIIRQ, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell tc35892_devs[] = { + { + .name = "tc35892-gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = &gpio_resources[0], + }, +}; + +static irqreturn_t tc35892_irq(int irq, void *data) +{ + struct tc35892 *tc35892 = data; + int status; + + status = tc35892_reg_read(tc35892, TC35892_IRQST); + if (status < 0) + return IRQ_NONE; + + while (status) { + int bit = __ffs(status); + + handle_nested_irq(tc35892->irq_base + bit); + status &= ~(1 << bit); + } + + /* + * A dummy read or write (to any register) appears to be necessary to + * have the last interrupt clear (for example, GPIO IC write) take + * effect. + */ + tc35892_reg_read(tc35892, TC35892_IRQST); + + return IRQ_HANDLED; +} + +static void tc35892_irq_dummy(unsigned int irq) +{ + /* No mask/unmask at this level */ +} + +static struct irq_chip tc35892_irq_chip = { + .name = "tc35892", + .mask = tc35892_irq_dummy, + .unmask = tc35892_irq_dummy, +}; + +static int tc35892_irq_init(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { + set_irq_chip_data(irq, tc35892); + set_irq_chip_and_handler(irq, &tc35892_irq_chip, + handle_edge_irq); + set_irq_nested_thread(irq, 1); +#ifdef CONFIG_ARM + set_irq_flags(irq, IRQF_VALID); +#else + set_irq_noprobe(irq); +#endif + } + + return 0; +} + +static void tc35892_irq_remove(struct tc35892 *tc35892) +{ + int base = tc35892->irq_base; + int irq; + + for (irq = base; irq < base + TC35892_NR_INTERNAL_IRQS; irq++) { +#ifdef CONFIG_ARM + set_irq_flags(irq, 0); +#endif + set_irq_chip_and_handler(irq, NULL, NULL); + set_irq_chip_data(irq, NULL); + } +} + +static int tc35892_chip_init(struct tc35892 *tc35892) +{ + int manf, ver, ret; + + manf = tc35892_reg_read(tc35892, TC35892_MANFCODE); + if (manf < 0) + return manf; + + ver = tc35892_reg_read(tc35892, TC35892_VERSION); + if (ver < 0) + return ver; + + if (manf != TC35892_MANFCODE_MAGIC) { + dev_err(tc35892->dev, "unknown manufacturer: %#x\n", manf); + return -EINVAL; + } + + dev_info(tc35892->dev, "manufacturer: %#x, version: %#x\n", manf, ver); + + /* Put everything except the IRQ module into reset */ + ret = tc35892_reg_write(tc35892, TC35892_RSTCTRL, + TC35892_RSTCTRL_TIMRST + | TC35892_RSTCTRL_ROTRST + | TC35892_RSTCTRL_KBDRST + | TC35892_RSTCTRL_GPIRST); + if (ret < 0) + return ret; + + /* Clear the reset interrupt. */ + return tc35892_reg_write(tc35892, TC35892_RSTINTCLR, 0x1); +} + +static int __devinit tc35892_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tc35892_platform_data *pdata = i2c->dev.platform_data; + struct tc35892 *tc35892; + int ret; + + if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_I2C_BLOCK)) + return -EIO; + + tc35892 = kzalloc(sizeof(struct tc35892), GFP_KERNEL); + if (!tc35892) + return -ENOMEM; + + mutex_init(&tc35892->lock); + + tc35892->dev = &i2c->dev; + tc35892->i2c = i2c; + tc35892->pdata = pdata; + tc35892->irq_base = pdata->irq_base; + tc35892->num_gpio = id->driver_data; + + i2c_set_clientdata(i2c, tc35892); + + ret = tc35892_chip_init(tc35892); + if (ret) + goto out_free; + + ret = tc35892_irq_init(tc35892); + if (ret) + goto out_free; + + ret = request_threaded_irq(tc35892->i2c->irq, NULL, tc35892_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "tc35892", tc35892); + if (ret) { + dev_err(tc35892->dev, "failed to request IRQ: %d\n", ret); + goto out_removeirq; + } + + ret = mfd_add_devices(tc35892->dev, -1, tc35892_devs, + ARRAY_SIZE(tc35892_devs), NULL, + tc35892->irq_base); + if (ret) { + dev_err(tc35892->dev, "failed to add children\n"); + goto out_freeirq; + } + + return 0; + +out_freeirq: + free_irq(tc35892->i2c->irq, tc35892); +out_removeirq: + tc35892_irq_remove(tc35892); +out_free: + i2c_set_clientdata(i2c, NULL); + kfree(tc35892); + return ret; +} + +static int __devexit tc35892_remove(struct i2c_client *client) +{ + struct tc35892 *tc35892 = i2c_get_clientdata(client); + + mfd_remove_devices(tc35892->dev); + + free_irq(tc35892->i2c->irq, tc35892); + tc35892_irq_remove(tc35892); + + i2c_set_clientdata(client, NULL); + kfree(tc35892); + + return 0; +} + +static const struct i2c_device_id tc35892_id[] = { + { "tc35892", 24 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tc35892_id); + +static struct i2c_driver tc35892_driver = { + .driver.name = "tc35892", + .driver.owner = THIS_MODULE, + .probe = tc35892_probe, + .remove = __devexit_p(tc35892_remove), + .id_table = tc35892_id, +}; + +static int __init tc35892_init(void) +{ + return i2c_add_driver(&tc35892_driver); +} +subsys_initcall(tc35892_init); + +static void __exit tc35892_exit(void) +{ + i2c_del_driver(&tc35892_driver); +} +module_exit(tc35892_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TC35892 MFD core driver"); +MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent"); diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index 7f478ec4184b..ac5995026c88 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -40,6 +41,8 @@ #include +#include + #include "timberdale.h" #define DRIVER_NAME "timberdale" @@ -69,6 +72,12 @@ static struct i2c_board_info timberdale_i2c_board_info[] = { }, }; +static __devinitdata struct xiic_i2c_platform_data +timberdale_xiic_platform_data = { + .devices = timberdale_i2c_board_info, + .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) +}; + static __devinitdata struct ocores_i2c_platform_data timberdale_ocores_platform_data = { .regstep = 4, @@ -77,7 +86,20 @@ timberdale_ocores_platform_data = { .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) }; -const static __devinitconst struct resource timberdale_ocores_resources[] = { +static const __devinitconst struct resource timberdale_xiic_resources[] = { + { + .start = XIICOFFSET, + .end = XIICEND, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_TIMBERDALE_I2C, + .end = IRQ_TIMBERDALE_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +static const __devinitconst struct resource timberdale_ocores_resources[] = { { .start = OCORESOFFSET, .end = OCORESEND, @@ -126,7 +148,7 @@ static __devinitdata struct xspi_platform_data timberdale_xspi_platform_data = { */ }; -const static __devinitconst struct resource timberdale_spi_resources[] = { +static const __devinitconst struct resource timberdale_spi_resources[] = { { .start = SPIOFFSET, .end = SPIEND, @@ -139,7 +161,7 @@ const static __devinitconst struct resource timberdale_spi_resources[] = { }, }; -const static __devinitconst struct resource timberdale_eth_resources[] = { +static const __devinitconst struct resource timberdale_eth_resources[] = { { .start = ETHOFFSET, .end = ETHEND, @@ -159,7 +181,7 @@ static __devinitdata struct timbgpio_platform_data .irq_base = 200, }; -const static __devinitconst struct resource timberdale_gpio_resources[] = { +static const __devinitconst struct resource timberdale_gpio_resources[] = { { .start = GPIOOFFSET, .end = GPIOEND, @@ -172,7 +194,7 @@ const static __devinitconst struct resource timberdale_gpio_resources[] = { }, }; -const static __devinitconst struct resource timberdale_mlogicore_resources[] = { +static const __devinitconst struct resource timberdale_mlogicore_resources[] = { { .start = MLCOREOFFSET, .end = MLCOREEND, @@ -190,7 +212,7 @@ const static __devinitconst struct resource timberdale_mlogicore_resources[] = { }, }; -const static __devinitconst struct resource timberdale_uart_resources[] = { +static const __devinitconst struct resource timberdale_uart_resources[] = { { .start = UARTOFFSET, .end = UARTEND, @@ -203,7 +225,7 @@ const static __devinitconst struct resource timberdale_uart_resources[] = { }, }; -const static __devinitconst struct resource timberdale_uartlite_resources[] = { +static const __devinitconst struct resource timberdale_uartlite_resources[] = { { .start = UARTLITEOFFSET, .end = UARTLITEEND, @@ -216,7 +238,7 @@ const static __devinitconst struct resource timberdale_uartlite_resources[] = { }, }; -const static __devinitconst struct resource timberdale_radio_resources[] = { +static const __devinitconst struct resource timberdale_radio_resources[] = { { .start = RDSOFFSET, .end = RDSEND, @@ -250,7 +272,66 @@ static __devinitdata struct timb_radio_platform_data } }; -const static __devinitconst struct resource timberdale_dma_resources[] = { +static __devinitdata struct timb_dma_platform_data timb_dma_platform_data = { + .nr_channels = 10, + .channels = { + { + /* UART RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* UART TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* MLB RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* MLB TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* Video RX */ + .rx = true, + .bytes_per_line = 1440, + .descriptors = 2, + .descriptor_elements = 16 + }, + { + /* Video framedrop */ + }, + { + /* SDHCI RX */ + .rx = true, + }, + { + /* SDHCI TX */ + }, + { + /* ETH RX */ + .rx = true, + .descriptors = 2, + .descriptor_elements = 1 + }, + { + /* ETH TX */ + .rx = false, + .descriptors = 2, + .descriptor_elements = 1 + }, + } +}; + +static const __devinitconst struct resource timberdale_dma_resources[] = { { .start = DMAOFFSET, .end = DMAEND, @@ -264,11 +345,25 @@ const static __devinitconst struct resource timberdale_dma_resources[] = { }; static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { + { + .name = "timb-dma", + .num_resources = ARRAY_SIZE(timberdale_dma_resources), + .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), + }, { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), .resources = timberdale_uart_resources, }, + { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), @@ -295,14 +390,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), @@ -313,6 +410,13 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { .num_resources = ARRAY_SIZE(timberdale_uartlite_resources), .resources = timberdale_uartlite_resources, }, + { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), @@ -344,19 +448,28 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), .resources = timberdale_uart_resources, }, + { + .name = "xiic-i2c", + .num_resources = ARRAY_SIZE(timberdale_xiic_resources), + .resources = timberdale_xiic_resources, + .platform_data = &timberdale_xiic_platform_data, + .data_size = sizeof(timberdale_xiic_platform_data), + }, { .name = "timb-gpio", .num_resources = ARRAY_SIZE(timberdale_gpio_resources), @@ -378,14 +491,16 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { .platform_data = &timberdale_xspi_platform_data, .data_size = sizeof(timberdale_xspi_platform_data), }, +}; + +static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { { .name = "timb-dma", .num_resources = ARRAY_SIZE(timberdale_dma_resources), .resources = timberdale_dma_resources, + .platform_data = &timb_dma_platform_data, + .data_size = sizeof(timb_dma_platform_data), }, -}; - -static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { { .name = "timb-uart", .num_resources = ARRAY_SIZE(timberdale_uart_resources), @@ -424,11 +539,6 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { .num_resources = ARRAY_SIZE(timberdale_eth_resources), .resources = timberdale_eth_resources, }, - { - .name = "timb-dma", - .num_resources = ARRAY_SIZE(timberdale_dma_resources), - .resources = timberdale_dma_resources, - }, }; static const __devinitconst struct resource timberdale_sdhc_resources[] = { diff --git a/drivers/mfd/timberdale.h b/drivers/mfd/timberdale.h index 8d27ffabc25d..c11bf6ebfe00 100644 --- a/drivers/mfd/timberdale.h +++ b/drivers/mfd/timberdale.h @@ -23,7 +23,7 @@ #ifndef MFD_TIMBERDALE_H #define MFD_TIMBERDALE_H -#define DRV_VERSION "0.1" +#define DRV_VERSION "0.2" /* This driver only support versions >= 3.8 and < 4.0 */ #define TIMB_SUPPORTED_MAJOR 3 @@ -66,7 +66,7 @@ #define CHIPCTLOFFSET 0x800 #define CHIPCTLEND 0x8ff -#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET) +#define CHIPCTLSIZE (CHIPCTLEND - CHIPCTLOFFSET + 1) #define INTCOFFSET 0xc00 #define INTCEND 0xfff @@ -127,4 +127,16 @@ #define GPIO_PIN_BT_RST 15 #define GPIO_NR_PINS 16 +/* DMA Channels */ +#define DMA_UART_RX 0 +#define DMA_UART_TX 1 +#define DMA_MLB_RX 2 +#define DMA_MLB_TX 3 +#define DMA_VIDEO_RX 4 +#define DMA_VIDEO_DROP 5 +#define DMA_SDHCI_RX 6 +#define DMA_SDHCI_TX 7 +#define DMA_ETH_RX 8 +#define DMA_ETH_TX 9 + #endif diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index e5955306c2fa..9b22a77f70f5 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -530,8 +530,8 @@ static int __exit tps65010_remove(struct i2c_client *client) cancel_delayed_work(&tps->work); flush_scheduled_work(); debugfs_remove(tps->file); - kfree(tps); i2c_set_clientdata(client, NULL); + kfree(tps); the_tps = NULL; return 0; } diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c new file mode 100644 index 000000000000..d859dffed39f --- /dev/null +++ b/drivers/mfd/tps6507x.c @@ -0,0 +1,159 @@ +/* + * tps6507x.c -- TPS6507x chip family multi-function driver + * + * Copyright (c) 2010 RidgeRun (todd.fischer@ridgerun.com) + * + * Author: Todd Fischer + * todd.fischer@ridgerun.com + * + * Credits: + * + * Using code from wm831x-*.c, wm8400-core, Wolfson Microelectronics PLC. + * + * For licencing details see kernel-base/COPYING + * + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct mfd_cell tps6507x_devs[] = { + { + .name = "tps6507x-pmic", + }, + { + .name = "tps6507x-ts", + }, +}; + + +static int tps6507x_i2c_read_device(struct tps6507x_dev *tps6507x, char reg, + int bytes, void *dest) +{ + struct i2c_client *i2c = tps6507x->i2c_client; + struct i2c_msg xfer[2]; + int ret; + + /* Write register */ + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = 1; + xfer[0].buf = ® + + /* Read data */ + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = bytes; + xfer[1].buf = dest; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + ret = 0; + else if (ret >= 0) + ret = -EIO; + + return ret; +} + +static int tps6507x_i2c_write_device(struct tps6507x_dev *tps6507x, char reg, + int bytes, void *src) +{ + struct i2c_client *i2c = tps6507x->i2c_client; + /* we add 1 byte for device register */ + u8 msg[TPS6507X_MAX_REGISTER + 1]; + int ret; + + if (bytes > (TPS6507X_MAX_REGISTER + 1)) + return -EINVAL; + + msg[0] = reg; + memcpy(&msg[1], src, bytes); + + ret = i2c_master_send(i2c, msg, bytes + 1); + if (ret < 0) + return ret; + if (ret != bytes + 1) + return -EIO; + return 0; +} + +static int tps6507x_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct tps6507x_dev *tps6507x; + int ret = 0; + + tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL); + if (tps6507x == NULL) { + kfree(i2c); + return -ENOMEM; + } + + i2c_set_clientdata(i2c, tps6507x); + tps6507x->dev = &i2c->dev; + tps6507x->i2c_client = i2c; + tps6507x->read_dev = tps6507x_i2c_read_device; + tps6507x->write_dev = tps6507x_i2c_write_device; + + ret = mfd_add_devices(tps6507x->dev, -1, + tps6507x_devs, ARRAY_SIZE(tps6507x_devs), + NULL, 0); + + if (ret < 0) + goto err; + + return ret; + +err: + mfd_remove_devices(tps6507x->dev); + kfree(tps6507x); + return ret; +} + +static int tps6507x_i2c_remove(struct i2c_client *i2c) +{ + struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c); + + mfd_remove_devices(tps6507x->dev); + kfree(tps6507x); + + return 0; +} + +static const struct i2c_device_id tps6507x_i2c_id[] = { + { "tps6507x", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, tps6507x_i2c_id); + + +static struct i2c_driver tps6507x_i2c_driver = { + .driver = { + .name = "tps6507x", + .owner = THIS_MODULE, + }, + .probe = tps6507x_i2c_probe, + .remove = tps6507x_i2c_remove, + .id_table = tps6507x_i2c_id, +}; + +static int __init tps6507x_i2c_init(void) +{ + return i2c_add_driver(&tps6507x_i2c_driver); +} +/* init early so consumer devices can complete system boot */ +subsys_initcall(tps6507x_i2c_init); + +static void __exit tps6507x_i2c_exit(void) +{ + i2c_del_driver(&tps6507x_i2c_driver); +} +module_exit(tps6507x_i2c_exit); + +MODULE_DESCRIPTION("TPS6507x chip family multi-function driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 202bdd59632d..097f24d8bceb 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -232,10 +232,11 @@ static const struct sih sih_modules_twl5031[8] = { }, [6] = { /* - * ACI doesn't use the same SIH organization. - * For example, it supports only one interrupt line + * ECI/DBI doesn't use the same SIH organization. + * For example, it supports only one interrupt output line. + * That is, the interrupts are seen on both INT1 and INT2 lines. */ - .name = "aci", + .name = "eci_dbi", .module = TWL5031_MODULE_ACCESSORY, .bits = 9, .bytes_ixr = 2, @@ -247,8 +248,8 @@ static const struct sih sih_modules_twl5031[8] = { }, [7] = { - /* Accessory */ - .name = "acc", + /* Audio accessory */ + .name = "audio", .module = TWL5031_MODULE_ACCESSORY, .control_offset = TWL5031_ACCSIHCTRL, .bits = 2, diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index f2ab025ad97a..1a968f34d679 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -322,7 +322,11 @@ EXPORT_SYMBOL_GPL(wm831x_set_bits); */ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) { - int ret, src; + int ret, src, irq_masked, timeout; + + /* Are we using the interrupt? */ + irq_masked = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_1_MASK); + irq_masked &= WM831X_AUXADC_DATA_EINT; mutex_lock(&wm831x->auxadc_lock); @@ -342,6 +346,9 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) goto out; } + /* Clear any notification from a very late arriving interrupt */ + try_wait_for_completion(&wm831x->auxadc_done); + ret = wm831x_set_bits(wm831x, WM831X_AUXADC_CONTROL, WM831X_AUX_CVT_ENA, WM831X_AUX_CVT_ENA); if (ret < 0) { @@ -349,22 +356,46 @@ int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input) goto disable; } - /* If an interrupt arrived late clean up after it */ - try_wait_for_completion(&wm831x->auxadc_done); - - /* Ignore the result to allow us to soldier on without IRQ hookup */ - wait_for_completion_timeout(&wm831x->auxadc_done, msecs_to_jiffies(5)); - - ret = wm831x_reg_read(wm831x, WM831X_AUXADC_CONTROL); - if (ret < 0) { - dev_err(wm831x->dev, "AUXADC status read failed: %d\n", ret); - goto disable; - } - - if (ret & WM831X_AUX_CVT_ENA) { - dev_err(wm831x->dev, "Timed out reading AUXADC\n"); - ret = -EBUSY; - goto disable; + if (irq_masked) { + /* If we're not using interrupts then poll the + * interrupt status register */ + timeout = 5; + while (timeout) { + msleep(1); + + ret = wm831x_reg_read(wm831x, + WM831X_INTERRUPT_STATUS_1); + if (ret < 0) { + dev_err(wm831x->dev, + "ISR 1 read failed: %d\n", ret); + goto disable; + } + + /* Did it complete? */ + if (ret & WM831X_AUXADC_DATA_EINT) { + wm831x_reg_write(wm831x, + WM831X_INTERRUPT_STATUS_1, + WM831X_AUXADC_DATA_EINT); + break; + } else { + dev_err(wm831x->dev, + "AUXADC conversion timeout\n"); + ret = -EBUSY; + goto disable; + } + } + } else { + /* If we are using interrupts then wait for the + * interrupt to complete. Use an extremely long + * timeout to handle situations with heavy load where + * the notification of the interrupt may be delayed by + * threaded IRQ handling. */ + if (!wait_for_completion_timeout(&wm831x->auxadc_done, + msecs_to_jiffies(500))) { + dev_err(wm831x->dev, "Timed out waiting for AUXADC\n"); + ret = -EBUSY; + goto disable; + } } ret = wm831x_reg_read(wm831x, WM831X_AUXADC_DATA); @@ -1463,6 +1494,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8310: parent = WM8310; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1474,6 +1506,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8311: parent = WM8311; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1485,6 +1518,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8312: parent = WM8312; wm831x->num_gpio = 16; + wm831x->charger_irq_wake = 1; if (rev > 0) { wm831x->has_gpio_ena = 1; wm831x->has_cs_sts = 1; @@ -1623,6 +1657,42 @@ static void wm831x_device_exit(struct wm831x *wm831x) kfree(wm831x); } +static int wm831x_device_suspend(struct wm831x *wm831x) +{ + int reg, mask; + + /* If the charger IRQs are a wake source then make sure we ack + * them even if they're not actively being used (eg, no power + * driver or no IRQ line wired up) then acknowledge the + * interrupts otherwise suspend won't last very long. + */ + if (wm831x->charger_irq_wake) { + reg = wm831x_reg_read(wm831x, WM831X_INTERRUPT_STATUS_2_MASK); + + mask = WM831X_CHG_BATT_HOT_EINT | + WM831X_CHG_BATT_COLD_EINT | + WM831X_CHG_BATT_FAIL_EINT | + WM831X_CHG_OV_EINT | WM831X_CHG_END_EINT | + WM831X_CHG_TO_EINT | WM831X_CHG_MODE_EINT | + WM831X_CHG_START_EINT; + + /* If any of the interrupts are masked read the statuses */ + if (reg & mask) + reg = wm831x_reg_read(wm831x, + WM831X_INTERRUPT_STATUS_2); + + if (reg & mask) { + dev_info(wm831x->dev, + "Acknowledging masked charger IRQs: %x\n", + reg & mask); + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_2, + reg & mask); + } + } + + return 0; +} + static int wm831x_i2c_read_device(struct wm831x *wm831x, unsigned short reg, int bytes, void *dest) { @@ -1697,6 +1767,13 @@ static int wm831x_i2c_remove(struct i2c_client *i2c) return 0; } +static int wm831x_i2c_suspend(struct i2c_client *i2c, pm_message_t mesg) +{ + struct wm831x *wm831x = i2c_get_clientdata(i2c); + + return wm831x_device_suspend(wm831x); +} + static const struct i2c_device_id wm831x_i2c_id[] = { { "wm8310", WM8310 }, { "wm8311", WM8311 }, @@ -1714,6 +1791,7 @@ static struct i2c_driver wm831x_i2c_driver = { }, .probe = wm831x_i2c_probe, .remove = wm831x_i2c_remove, + .suspend = wm831x_i2c_suspend, .id_table = wm831x_i2c_id, }; diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 4c1122ceb443..7dabe4dbd373 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -39,8 +39,6 @@ struct wm831x_irq_data { int primary; int reg; int mask; - irq_handler_t handler; - void *handler_data; }; static struct wm831x_irq_data wm831x_irqs[] = { @@ -492,6 +490,14 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) mutex_init(&wm831x->irq_lock); + /* Mask the individual interrupt sources */ + for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { + wm831x->irq_masks_cur[i] = 0xffff; + wm831x->irq_masks_cache[i] = 0xffff; + wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, + 0xffff); + } + if (!irq) { dev_warn(wm831x->dev, "No interrupt specified - functionality limited\n"); @@ -507,14 +513,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) wm831x->irq = irq; wm831x->irq_base = pdata->irq_base; - /* Mask the individual interrupt sources */ - for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) { - wm831x->irq_masks_cur[i] = 0xffff; - wm831x->irq_masks_cache[i] = 0xffff; - wm831x_reg_write(wm831x, WM831X_INTERRUPT_STATUS_1_MASK + i, - 0xffff); - } - /* Register them with genirq */ for (cur_irq = wm831x->irq_base; cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base; diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c index 65830f57c093..7795af4b1fe1 100644 --- a/drivers/mfd/wm8350-i2c.c +++ b/drivers/mfd/wm8350-i2c.c @@ -64,10 +64,8 @@ static int wm8350_i2c_probe(struct i2c_client *i2c, int ret = 0; wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL); - if (wm8350 == NULL) { - kfree(i2c); + if (wm8350 == NULL) return -ENOMEM; - } i2c_set_clientdata(i2c, wm8350); wm8350->dev = &i2c->dev; @@ -82,6 +80,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c, return ret; err: + i2c_set_clientdata(i2c, NULL); kfree(wm8350); return ret; } @@ -91,6 +90,7 @@ static int wm8350_i2c_remove(struct i2c_client *i2c) struct wm8350 *wm8350 = i2c_get_clientdata(i2c); wm8350_device_exit(wm8350); + i2c_set_clientdata(i2c, NULL); kfree(wm8350); return 0; diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 865ce013a821..e08aafa663dc 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@ -118,7 +118,7 @@ static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest) { int i, ret = 0; - BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); + BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); /* If there are any volatile reads then read back the entire block */ for (i = reg; i < reg + num_regs; i++) @@ -144,7 +144,7 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs, { int ret, i; - BUG_ON(reg + num_regs - 1 > ARRAY_SIZE(wm8400->reg_cache)); + BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); for (i = 0; i < num_regs; i++) { BUG_ON(!reg_data[reg + i].writable); diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index 31a991161f0a..5bfb2a2041b8 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -75,6 +75,9 @@ enum ctype { UNALIGNED_LOAD_STORE_WRITE, OVERWRITE_ALLOCATION, WRITE_AFTER_FREE, + SOFTLOCKUP, + HARDLOCKUP, + HUNG_TASK, }; static char* cp_name[] = { @@ -99,6 +102,9 @@ static char* cp_type[] = { "UNALIGNED_LOAD_STORE_WRITE", "OVERWRITE_ALLOCATION", "WRITE_AFTER_FREE", + "SOFTLOCKUP", + "HARDLOCKUP", + "HUNG_TASK", }; static struct jprobe lkdtm; @@ -320,6 +326,20 @@ static void lkdtm_do_action(enum ctype which) memset(data, 0x78, len); break; } + case SOFTLOCKUP: + preempt_disable(); + for (;;) + cpu_relax(); + break; + case HARDLOCKUP: + local_irq_disable(); + for (;;) + cpu_relax(); + break; + case HUNG_TASK: + set_current_state(TASK_UNINTERRUPTIBLE); + schedule(); + break; case NONE: default: break; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 3168ebd616b2..569e94da844c 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1252,9 +1252,8 @@ EXPORT_SYMBOL(mmc_card_can_sleep); /** * mmc_suspend_host - suspend a host * @host: mmc host - * @state: suspend mode (PM_SUSPEND_xxx) */ -int mmc_suspend_host(struct mmc_host *host, pm_message_t state) +int mmc_suspend_host(struct mmc_host *host) { int err = 0; diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 0d96080d44b0..63772e7e7608 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -79,8 +79,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, * we cannot use the retries field in mmc_command. */ for (i = 0;i <= retries;i++) { - memset(&mrq, 0, sizeof(struct mmc_request)); - err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index ff27c8c71355..0f687cdeb064 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -405,6 +405,36 @@ void sdio_writeb(struct sdio_func *func, u8 b, unsigned int addr, int *err_ret) } EXPORT_SYMBOL_GPL(sdio_writeb); +/** + * sdio_writeb_readb - write and read a byte from SDIO function + * @func: SDIO function to access + * @write_byte: byte to write + * @addr: address to write to + * @err_ret: optional status value from transfer + * + * Performs a RAW (Read after Write) operation as defined by SDIO spec - + * single byte is written to address space of a given SDIO function and + * response is read back from the same address, both using single request. + * If there is a problem with the operation, 0xff is returned and + * @err_ret will contain the error code. + */ +u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, + unsigned int addr, int *err_ret) +{ + int ret; + u8 val; + + ret = mmc_io_rw_direct(func->card, 1, func->num, addr, + write_byte, &val); + if (err_ret) + *err_ret = ret; + if (ret) + val = 0xff; + + return val; +} +EXPORT_SYMBOL_GPL(sdio_writeb_readb); + /** * sdio_memcpy_fromio - read a chunk of memory from a SDIO function * @func: SDIO function to access diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 2e13b94769fd..e171e77f6129 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -136,6 +136,18 @@ config MMC_SDHCI_S3C If unsure, say N. +config MMC_SDHCI_SPEAR + tristate "SDHCI support on ST SPEAr platform" + depends on MMC_SDHCI && PLAT_SPEAR + help + This selects the Secure Digital Host Controller Interface (SDHCI) + often referrered to as the HSMMC block in some of the ST SPEAR range + of SoC + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SDHCI_S3C_DMA bool "DMA support on S3C SDHCI" depends on MMC_SDHCI_S3C && EXPERIMENTAL @@ -412,3 +424,11 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND depends on SDH_BFIN help If you say yes here SD-Cards may work on the EZkit. + +config MMC_SH_MMCIF + tristate "SuperH Internal MMCIF support" + depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) + help + This selects the MMC Host Interface controler (MMCIF). + + This driver supports MMCIF in sh7724/sh7757/sh7372. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index f4803977dfce..e30c2ee48894 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o +obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o @@ -34,6 +35,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o +obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o sdhci-of-y := sdhci-of-core.o diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 336d9f553f3e..5f3a599ead07 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -1157,7 +1157,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) enable_irq_wake(host->board->det_pin); if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index df0e8a88d85f..95ef864ad8f9 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -173,6 +173,7 @@ struct atmel_mci { * @mmc: The mmc_host representing this slot. * @host: The MMC controller this slot is using. * @sdc_reg: Value of SDCR to be written before using this slot. + * @sdio_irq: SDIO irq mask for this slot. * @mrq: mmc_request currently being processed or waiting to be * processed, or NULL when the slot is idle. * @queue_node: List node for placing this node in the @queue list of @@ -191,6 +192,7 @@ struct atmel_mci_slot { struct atmel_mci *host; u32 sdc_reg; + u32 sdio_irq; struct mmc_request *mrq; struct list_head queue_node; @@ -792,7 +794,7 @@ static void atmci_start_request(struct atmel_mci *host, mci_writel(host, SDCR, slot->sdc_reg); iflags = mci_readl(host, IMR); - if (iflags) + if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB)) dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", iflags); @@ -952,10 +954,21 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (mci_has_rwproof()) host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); - if (list_empty(&host->queue)) + if (atmci_is_mci2()) { + /* setup High Speed mode in relation with card capacity */ + if (ios->timing == MMC_TIMING_SD_HS) + host->cfg_reg |= MCI_CFG_HSMODE; + else + host->cfg_reg &= ~MCI_CFG_HSMODE; + } + + if (list_empty(&host->queue)) { mci_writel(host, MR, host->mode_reg); - else + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); + } else { host->need_clock_update = true; + } spin_unlock_bh(&host->lock); } else { @@ -1030,11 +1043,23 @@ static int atmci_get_cd(struct mmc_host *mmc) return present; } +static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct atmel_mci_slot *slot = mmc_priv(mmc); + struct atmel_mci *host = slot->host; + + if (enable) + mci_writel(host, IER, slot->sdio_irq); + else + mci_writel(host, IDR, slot->sdio_irq); +} + static const struct mmc_host_ops atmci_ops = { .request = atmci_request, .set_ios = atmci_set_ios, .get_ro = atmci_get_ro, .get_cd = atmci_get_cd, + .enable_sdio_irq = atmci_enable_sdio_irq, }; /* Called with host->lock held */ @@ -1052,8 +1077,11 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) * necessary if set_ios() is called when a different slot is * busy transfering data. */ - if (host->need_clock_update) + if (host->need_clock_update) { mci_writel(host, MR, host->mode_reg); + if (atmci_is_mci2()) + mci_writel(host, CFG, host->cfg_reg); + } host->cur_slot->mrq = NULL; host->mrq = NULL; @@ -1483,6 +1511,19 @@ static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) tasklet_schedule(&host->tasklet); } +static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) +{ + int i; + + for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { + struct atmel_mci_slot *slot = host->slot[i]; + if (slot && (status & slot->sdio_irq)) { + mmc_signal_sdio_irq(slot->mmc); + } + } +} + + static irqreturn_t atmci_interrupt(int irq, void *dev_id) { struct atmel_mci *host = dev_id; @@ -1522,6 +1563,10 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) if (pending & MCI_CMDRDY) atmci_cmd_interrupt(host, status); + + if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB)) + atmci_sdio_interrupt(host, status); + } while (pass_count++ < 5); return pass_count ? IRQ_HANDLED : IRQ_NONE; @@ -1544,7 +1589,7 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) static int __init atmci_init_slot(struct atmel_mci *host, struct mci_slot_pdata *slot_data, unsigned int id, - u32 sdc_reg) + u32 sdc_reg, u32 sdio_irq) { struct mmc_host *mmc; struct atmel_mci_slot *slot; @@ -1560,11 +1605,16 @@ static int __init atmci_init_slot(struct atmel_mci *host, slot->wp_pin = slot_data->wp_pin; slot->detect_is_active_high = slot_data->detect_is_active_high; slot->sdc_reg = sdc_reg; + slot->sdio_irq = sdio_irq; mmc->ops = &atmci_ops; mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); mmc->f_max = host->bus_hz / 2; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + if (sdio_irq) + mmc->caps |= MMC_CAP_SDIO_IRQ; + if (atmci_is_mci2()) + mmc->caps |= MMC_CAP_SD_HIGHSPEED; if (slot_data->bus_width >= 4) mmc->caps |= MMC_CAP_4_BIT_DATA; @@ -1753,13 +1803,13 @@ static int __init atmci_probe(struct platform_device *pdev) ret = -ENODEV; if (pdata->slot[0].bus_width) { ret = atmci_init_slot(host, &pdata->slot[0], - 0, MCI_SDCSEL_SLOT_A); + 0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA); if (!ret) nr_slots++; } if (pdata->slot[1].bus_width) { ret = atmci_init_slot(host, &pdata->slot[1], - 1, MCI_SDCSEL_SLOT_B); + 1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB); if (!ret) nr_slots++; } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index f5834449400e..c8da5d30a861 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -1142,7 +1142,7 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) struct au1xmmc_host *host = platform_get_drvdata(pdev); int ret; - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret) return ret; diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 6919e844072c..4b0e677d7295 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -576,7 +576,7 @@ static int sdh_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); peripheral_free_list(drv_data->pin_req); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 92a324f7417c..ca3bdc831900 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -675,7 +675,7 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) struct mmc_host *mmc = cb710_slot_to_mmc(slot); int err; - err = mmc_suspend_host(mmc, state); + err = mmc_suspend_host(mmc); if (err) return err; diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 3bd0ba294e9d..33d9f1b00862 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -137,15 +137,15 @@ /* * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, - * and we handle up to NR_SG segments. MMC_BLOCK_BOUNCE kicks in only + * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only * for drivers with max_hw_segs == 1, making the segments bigger (64KB) - * than the page or two that's otherwise typical. NR_SG == 16 gives at - * least the same throughput boost, using EDMA transfer linkage instead - * of spending CPU time copying pages. + * than the page or two that's otherwise typical. nr_sg (passed from + * platform data) == 16 gives at least the same throughput boost, using + * EDMA transfer linkage instead of spending CPU time copying pages. */ #define MAX_CCNT ((1 << 16) - 1) -#define NR_SG 16 +#define MAX_NR_SG 16 static unsigned rw_threshold = 32; module_param(rw_threshold, uint, S_IRUGO); @@ -171,6 +171,7 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; + unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies @@ -192,7 +193,7 @@ struct mmc_davinci_host { struct edmacc_param tx_template; struct edmacc_param rx_template; unsigned n_link; - u32 links[NR_SG - 1]; + u32 links[MAX_NR_SG - 1]; /* For PIO we walk scatterlists one segment at a time. */ unsigned int sg_len; @@ -202,6 +203,8 @@ struct mmc_davinci_host { u8 version; /* for ns in one cycle calculation */ unsigned ns_in_one_cycle; + /* Number of sg segments */ + u8 nr_sg; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif @@ -568,6 +571,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host) static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) { + u32 link_size; int r, i; /* Acquire master DMA write channel */ @@ -593,7 +597,8 @@ static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) /* Allocate parameter RAM slots, which will later be bound to a * channel as needed to handle a scatterlist. */ - for (i = 0; i < ARRAY_SIZE(host->links); i++) { + link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); + for (i = 0; i < link_size; i++) { r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); if (r < 0) { dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", @@ -905,19 +910,26 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, } } -static void -davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, + int val) { u32 temp; - /* reset command and data state machines */ temp = readl(host->base + DAVINCI_MMCCTL); - writel(temp | MMCCTL_CMDRST | MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); + if (val) /* reset */ + temp |= MMCCTL_CMDRST | MMCCTL_DATRST; + else /* enable */ + temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); - temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); - udelay(10); writel(temp, host->base + DAVINCI_MMCCTL); + udelay(10); +} + +static void +davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) +{ + mmc_davinci_reset_ctrl(host, 1); + mmc_davinci_reset_ctrl(host, 0); } static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) @@ -1121,15 +1133,8 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) #endif static void __init init_mmcsd_host(struct mmc_davinci_host *host) { - /* DAT line portion is diabled and in reset state */ - writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); - - /* CMD line portion is diabled and in reset state */ - writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_CMDRST, - host->base + DAVINCI_MMCCTL); - udelay(10); + mmc_davinci_reset_ctrl(host, 1); writel(0, host->base + DAVINCI_MMCCLK); writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); @@ -1137,12 +1142,7 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host) writel(0x1FFF, host->base + DAVINCI_MMCTOR); writel(0xFFFF, host->base + DAVINCI_MMCTOD); - writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_DATRST, - host->base + DAVINCI_MMCCTL); - writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_CMDRST, - host->base + DAVINCI_MMCCTL); - - udelay(10); + mmc_davinci_reset_ctrl(host, 0); } static int __init davinci_mmcsd_probe(struct platform_device *pdev) @@ -1202,6 +1202,12 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) init_mmcsd_host(host); + if (pdata->nr_sg) + host->nr_sg = pdata->nr_sg - 1; + + if (host->nr_sg > MAX_NR_SG || !host->nr_sg) + host->nr_sg = MAX_NR_SG; + host->use_dma = use_dma; host->irq = irq; @@ -1327,32 +1333,65 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int davinci_mmcsd_suspend(struct platform_device *pdev, pm_message_t msg) +static int davinci_mmcsd_suspend(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); + int ret; - return mmc_suspend_host(host->mmc, msg); + mmc_host_enable(host->mmc); + ret = mmc_suspend_host(host->mmc); + if (!ret) { + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + mmc_host_disable(host->mmc); + clk_disable(host->clk); + host->suspended = 1; + } else { + host->suspended = 0; + mmc_host_disable(host->mmc); + } + + return ret; } -static int davinci_mmcsd_resume(struct platform_device *pdev) +static int davinci_mmcsd_resume(struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); + int ret; + + if (!host->suspended) + return 0; - return mmc_resume_host(host->mmc); + clk_enable(host->clk); + mmc_host_enable(host->mmc); + + mmc_davinci_reset_ctrl(host, 0); + ret = mmc_resume_host(host->mmc); + if (!ret) + host->suspended = 0; + + return ret; } + +static const struct dev_pm_ops davinci_mmcsd_pm = { + .suspend = davinci_mmcsd_suspend, + .resume = davinci_mmcsd_resume, +}; + +#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) #else -#define davinci_mmcsd_suspend NULL -#define davinci_mmcsd_resume NULL +#define davinci_mmcsd_pm_ops NULL #endif static struct platform_driver davinci_mmcsd_driver = { .driver = { .name = "davinci_mmc", .owner = THIS_MODULE, + .pm = davinci_mmcsd_pm_ops, }, .remove = __exit_p(davinci_mmcsd_remove), - .suspend = davinci_mmcsd_suspend, - .resume = davinci_mmcsd_resume, }; static int __init davinci_mmcsd_init(void) diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index bf98d7cc928a..9a68ff4353a2 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -1115,7 +1115,7 @@ static int imxmci_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index ff115d920888..4917af96bae1 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -824,7 +824,7 @@ static int mmci_suspend(struct amba_device *dev, pm_message_t state) if (mmc) { struct mmci_host *host = mmc_priv(mmc); - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); if (ret == 0) writel(0, host->base + MMCIMASK0); } diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 61f1d27fed3f..24e09454e522 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -1327,7 +1327,7 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) disable_irq(host->stat_irq); if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc, state); + rc = mmc_suspend_host(mmc); if (!rc) msmsdcc_writel(host, 0, MMCIMASK0); if (host->clks_on) diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 34e23489811a..366eefa77c5a 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -865,7 +865,7 @@ static int mvsd_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index ec18e3b60342..d9d4a72e0ec7 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -932,7 +932,7 @@ static int mxcmci_suspend(struct platform_device *dev, pm_message_t state) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 84d280406341..2b281680e320 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -39,30 +39,30 @@ #include #define OMAP_MMC_REG_CMD 0x00 -#define OMAP_MMC_REG_ARGL 0x04 -#define OMAP_MMC_REG_ARGH 0x08 -#define OMAP_MMC_REG_CON 0x0c -#define OMAP_MMC_REG_STAT 0x10 -#define OMAP_MMC_REG_IE 0x14 -#define OMAP_MMC_REG_CTO 0x18 -#define OMAP_MMC_REG_DTO 0x1c -#define OMAP_MMC_REG_DATA 0x20 -#define OMAP_MMC_REG_BLEN 0x24 -#define OMAP_MMC_REG_NBLK 0x28 -#define OMAP_MMC_REG_BUF 0x2c -#define OMAP_MMC_REG_SDIO 0x34 -#define OMAP_MMC_REG_REV 0x3c -#define OMAP_MMC_REG_RSP0 0x40 -#define OMAP_MMC_REG_RSP1 0x44 -#define OMAP_MMC_REG_RSP2 0x48 -#define OMAP_MMC_REG_RSP3 0x4c -#define OMAP_MMC_REG_RSP4 0x50 -#define OMAP_MMC_REG_RSP5 0x54 -#define OMAP_MMC_REG_RSP6 0x58 -#define OMAP_MMC_REG_RSP7 0x5c -#define OMAP_MMC_REG_IOSR 0x60 -#define OMAP_MMC_REG_SYSC 0x64 -#define OMAP_MMC_REG_SYSS 0x68 +#define OMAP_MMC_REG_ARGL 0x01 +#define OMAP_MMC_REG_ARGH 0x02 +#define OMAP_MMC_REG_CON 0x03 +#define OMAP_MMC_REG_STAT 0x04 +#define OMAP_MMC_REG_IE 0x05 +#define OMAP_MMC_REG_CTO 0x06 +#define OMAP_MMC_REG_DTO 0x07 +#define OMAP_MMC_REG_DATA 0x08 +#define OMAP_MMC_REG_BLEN 0x09 +#define OMAP_MMC_REG_NBLK 0x0a +#define OMAP_MMC_REG_BUF 0x0b +#define OMAP_MMC_REG_SDIO 0x0d +#define OMAP_MMC_REG_REV 0x0f +#define OMAP_MMC_REG_RSP0 0x10 +#define OMAP_MMC_REG_RSP1 0x11 +#define OMAP_MMC_REG_RSP2 0x12 +#define OMAP_MMC_REG_RSP3 0x13 +#define OMAP_MMC_REG_RSP4 0x14 +#define OMAP_MMC_REG_RSP5 0x15 +#define OMAP_MMC_REG_RSP6 0x16 +#define OMAP_MMC_REG_RSP7 0x17 +#define OMAP_MMC_REG_IOSR 0x18 +#define OMAP_MMC_REG_SYSC 0x19 +#define OMAP_MMC_REG_SYSS 0x1a #define OMAP_MMC_STAT_CARD_ERR (1 << 14) #define OMAP_MMC_STAT_CARD_IRQ (1 << 13) @@ -78,8 +78,9 @@ #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) -#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG_##reg) -#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG_##reg) +#define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) +#define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) +#define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) /* * Command types @@ -133,6 +134,7 @@ struct mmc_omap_host { int irq; unsigned char bus_mode; unsigned char hw_bus_mode; + unsigned int reg_shift; struct work_struct cmd_abort_work; unsigned abort:1; @@ -680,9 +682,9 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG_DATA, host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); } } @@ -900,7 +902,7 @@ mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) int dst_port = 0; int sync_dev = 0; - data_addr = host->phys_base + OMAP_MMC_REG_DATA; + data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); frame = data->blksz; count = sg_dma_len(sg); @@ -1493,6 +1495,8 @@ static int __init mmc_omap_probe(struct platform_device *pdev) } } + host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); + return 0; err_plat_cleanup: @@ -1557,7 +1561,7 @@ static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) struct mmc_omap_slot *slot; slot = host->slots[i]; - ret = mmc_suspend_host(slot->mmc, mesg); + ret = mmc_suspend_host(slot->mmc); if (ret < 0) { while (--i >= 0) { slot = host->slots[i]; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index e9caf694c59e..b032828c6126 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -157,12 +157,10 @@ struct omap_hsmmc_host { */ struct regulator *vcc; struct regulator *vcc_aux; - struct semaphore sem; struct work_struct mmc_carddetect_work; void __iomem *base; resource_size_t mapbase; spinlock_t irq_lock; /* Prevent races with irq handler */ - unsigned long flags; unsigned int id; unsigned int dma_len; unsigned int dma_sg_idx; @@ -183,6 +181,7 @@ struct omap_hsmmc_host { int protect_card; int reqs_blocked; int use_reg; + int req_in_progress; struct omap_mmc_platform_data *pdata; }; @@ -524,6 +523,27 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); } +static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host) +{ + unsigned int irq_mask; + + if (host->use_dma) + irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE); + else + irq_mask = INT_EN_MASK; + + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); + OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); + OMAP_HSMMC_WRITE(host->base, IE, irq_mask); +} + +static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host) +{ + OMAP_HSMMC_WRITE(host->base, ISE, 0); + OMAP_HSMMC_WRITE(host->base, IE, 0); + OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); +} + #ifdef CONFIG_PM /* @@ -592,9 +612,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) && time_before(jiffies, timeout)) ; - OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_disable_irq(host); /* Do not initialize card-specific things if the power is off */ if (host->power_mode == MMC_POWER_OFF) @@ -697,6 +715,8 @@ static void send_init_stream(struct omap_hsmmc_host *host) return; disable_irq(host->irq); + + OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base, CON) | INIT_STREAM); OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); @@ -762,17 +782,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, mmc_hostname(host->mmc), cmd->opcode, cmd->arg); host->cmd = cmd; - /* - * Clear status bits and enable interrupts - */ - OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - - if (host->use_dma) - OMAP_HSMMC_WRITE(host->base, IE, - INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE)); - else - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_enable_irq(host); host->response_busy = 0; if (cmd->flags & MMC_RSP_PRESENT) { @@ -806,13 +816,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, if (host->use_dma) cmdreg |= DMA_EN; - /* - * In an interrupt context (i.e. STOP command), the spinlock is unlocked - * by the interrupt handler, otherwise (i.e. for a new request) it is - * unlocked here. - */ - if (!in_interrupt()) - spin_unlock_irqrestore(&host->irq_lock, host->flags); + host->req_in_progress = 1; OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg); OMAP_HSMMC_WRITE(host->base, CMD, cmdreg); @@ -827,6 +831,23 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) +{ + int dma_ch; + + spin_lock(&host->irq_lock); + host->req_in_progress = 0; + dma_ch = host->dma_ch; + spin_unlock(&host->irq_lock); + + omap_hsmmc_disable_irq(host); + /* Do not complete the request if DMA is still in progress */ + if (mrq->data && host->use_dma && dma_ch != -1) + return; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); +} + /* * Notify the transfer complete to MMC core */ @@ -843,25 +864,19 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) return; } - host->mrq = NULL; - mmc_request_done(host->mmc, mrq); + omap_hsmmc_request_done(host, mrq); return; } host->data = NULL; - if (host->use_dma && host->dma_ch != -1) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, - omap_hsmmc_get_dma_dir(host, data)); - if (!data->error) data->bytes_xfered += data->blocks * (data->blksz); else data->bytes_xfered = 0; if (!data->stop) { - host->mrq = NULL; - mmc_request_done(host->mmc, data->mrq); + omap_hsmmc_request_done(host, data->mrq); return; } omap_hsmmc_start_command(host, data->stop, NULL); @@ -887,10 +902,8 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) cmd->resp[0] = OMAP_HSMMC_READ(host->base, RSP10); } } - if ((host->data == NULL && !host->response_busy) || cmd->error) { - host->mrq = NULL; - mmc_request_done(host->mmc, cmd->mrq); - } + if ((host->data == NULL && !host->response_busy) || cmd->error) + omap_hsmmc_request_done(host, cmd->mrq); } /* @@ -898,14 +911,19 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) */ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) { + int dma_ch; + host->data->error = errno; - if (host->use_dma && host->dma_ch != -1) { + spin_lock(&host->irq_lock); + dma_ch = host->dma_ch; + host->dma_ch = -1; + spin_unlock(&host->irq_lock); + + if (host->use_dma && dma_ch != -1) { dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - up(&host->sem); + omap_free_dma(dma_ch); } host->data = NULL; } @@ -967,28 +985,21 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, __func__); } -/* - * MMC controller IRQ handler - */ -static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) +static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) { - struct omap_hsmmc_host *host = dev_id; struct mmc_data *data; - int end_cmd = 0, end_trans = 0, status; - - spin_lock(&host->irq_lock); - - if (host->mrq == NULL) { - OMAP_HSMMC_WRITE(host->base, STAT, - OMAP_HSMMC_READ(host->base, STAT)); - /* Flush posted write */ - OMAP_HSMMC_READ(host->base, STAT); - spin_unlock(&host->irq_lock); - return IRQ_HANDLED; + int end_cmd = 0, end_trans = 0; + + if (!host->req_in_progress) { + do { + OMAP_HSMMC_WRITE(host->base, STAT, status); + /* Flush posted write */ + status = OMAP_HSMMC_READ(host->base, STAT); + } while (status & INT_EN_MASK); + return; } data = host->data; - status = OMAP_HSMMC_READ(host->base, STAT); dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); if (status & ERR) { @@ -1041,15 +1052,27 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) } OMAP_HSMMC_WRITE(host->base, STAT, status); - /* Flush posted write */ - OMAP_HSMMC_READ(host->base, STAT); if (end_cmd || ((status & CC) && host->cmd)) omap_hsmmc_cmd_done(host, host->cmd); if ((end_trans || (status & TC)) && host->mrq) omap_hsmmc_xfer_done(host, data); +} - spin_unlock(&host->irq_lock); +/* + * MMC controller IRQ handler + */ +static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) +{ + struct omap_hsmmc_host *host = dev_id; + int status; + + status = OMAP_HSMMC_READ(host->base, STAT); + do { + omap_hsmmc_do_irq(host, status); + /* Flush posted write */ + status = OMAP_HSMMC_READ(host->base, STAT); + } while (status & INT_EN_MASK); return IRQ_HANDLED; } @@ -1244,31 +1267,47 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, /* * DMA call back function */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) +static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) { - struct omap_hsmmc_host *host = data; + struct omap_hsmmc_host *host = cb_data; + struct mmc_data *data = host->mrq->data; + int dma_ch, req_in_progress; if (ch_status & OMAP2_DMA_MISALIGNED_ERR_IRQ) dev_dbg(mmc_dev(host->mmc), "MISALIGNED_ADRS_ERR\n"); - if (host->dma_ch < 0) + spin_lock(&host->irq_lock); + if (host->dma_ch < 0) { + spin_unlock(&host->irq_lock); return; + } host->dma_sg_idx++; if (host->dma_sg_idx < host->dma_len) { /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, host->data, - host->data->sg + host->dma_sg_idx); + omap_hsmmc_config_dma_params(host, data, + data->sg + host->dma_sg_idx); + spin_unlock(&host->irq_lock); return; } - omap_free_dma(host->dma_ch); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, + omap_hsmmc_get_dma_dir(host, data)); + + req_in_progress = host->req_in_progress; + dma_ch = host->dma_ch; host->dma_ch = -1; - /* - * DMA Callback: run in interrupt context. - * mutex_unlock will throw a kernel warning if used. - */ - up(&host->sem); + spin_unlock(&host->irq_lock); + + omap_free_dma(dma_ch); + + /* If DMA has finished after TC, complete the request */ + if (!req_in_progress) { + struct mmc_request *mrq = host->mrq; + + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); + } } /* @@ -1277,7 +1316,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *data) static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, err = 1, i; + int dma_ch = 0, ret = 0, i; struct mmc_data *data = req->data; /* Sanity check: all the SG entries must be aligned by block size. */ @@ -1294,23 +1333,7 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, */ return -EINVAL; - /* - * If for some reason the DMA transfer is still active, - * we wait for timeout period and free the dma - */ - if (host->dma_ch != -1) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(100); - if (down_trylock(&host->sem)) { - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - up(&host->sem); - return err; - } - } else { - if (down_trylock(&host->sem)) - return err; - } + BUG_ON(host->dma_ch != -1); ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); @@ -1410,37 +1433,27 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) struct omap_hsmmc_host *host = mmc_priv(mmc); int err; - /* - * Prevent races with the interrupt handler because of unexpected - * interrupts, but not if we are already in interrupt context i.e. - * retries. - */ - if (!in_interrupt()) { - spin_lock_irqsave(&host->irq_lock, host->flags); - /* - * Protect the card from I/O if there is a possibility - * it can be removed. - */ - if (host->protect_card) { - if (host->reqs_blocked < 3) { - /* - * Ensure the controller is left in a consistent - * state by resetting the command and data state - * machines. - */ - omap_hsmmc_reset_controller_fsm(host, SRD); - omap_hsmmc_reset_controller_fsm(host, SRC); - host->reqs_blocked += 1; - } - req->cmd->error = -EBADF; - if (req->data) - req->data->error = -EBADF; - spin_unlock_irqrestore(&host->irq_lock, host->flags); - mmc_request_done(mmc, req); - return; - } else if (host->reqs_blocked) - host->reqs_blocked = 0; - } + BUG_ON(host->req_in_progress); + BUG_ON(host->dma_ch != -1); + if (host->protect_card) { + if (host->reqs_blocked < 3) { + /* + * Ensure the controller is left in a consistent + * state by resetting the command and data state + * machines. + */ + omap_hsmmc_reset_controller_fsm(host, SRD); + omap_hsmmc_reset_controller_fsm(host, SRC); + host->reqs_blocked += 1; + } + req->cmd->error = -EBADF; + if (req->data) + req->data->error = -EBADF; + req->cmd->retries = 0; + mmc_request_done(mmc, req); + return; + } else if (host->reqs_blocked) + host->reqs_blocked = 0; WARN_ON(host->mrq != NULL); host->mrq = req; err = omap_hsmmc_prepare_data(host, req); @@ -1449,8 +1462,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) if (req->data) req->data->error = err; host->mrq = NULL; - if (!in_interrupt()) - spin_unlock_irqrestore(&host->irq_lock, host->flags); mmc_request_done(mmc, req); return; } @@ -2019,7 +2030,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) mmc->f_min = 400000; mmc->f_max = 52000000; - sema_init(&host->sem, 1); spin_lock_init(&host->irq_lock); host->iclk = clk_get(&pdev->dev, "ick"); @@ -2162,8 +2172,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) } } - OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK); - OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK); + omap_hsmmc_disable_irq(host); mmc_host_lazy_disable(host->mmc); @@ -2258,10 +2267,12 @@ static int omap_hsmmc_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) +static int omap_hsmmc_suspend(struct device *dev) { int ret = 0; + struct platform_device *pdev = to_platform_device(dev); struct omap_hsmmc_host *host = platform_get_drvdata(pdev); + pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */ if (host && host->suspended) return 0; @@ -2281,12 +2292,9 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) } cancel_work_sync(&host->mmc_carddetect_work); mmc_host_enable(host->mmc); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret == 0) { - OMAP_HSMMC_WRITE(host->base, ISE, 0); - OMAP_HSMMC_WRITE(host->base, IE, 0); - - + omap_hsmmc_disable_irq(host); OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); mmc_host_disable(host->mmc); @@ -2310,9 +2318,10 @@ static int omap_hsmmc_suspend(struct platform_device *pdev, pm_message_t state) } /* Routine to resume the MMC device */ -static int omap_hsmmc_resume(struct platform_device *pdev) +static int omap_hsmmc_resume(struct device *dev) { int ret = 0; + struct platform_device *pdev = to_platform_device(dev); struct omap_hsmmc_host *host = platform_get_drvdata(pdev); if (host && !host->suspended) @@ -2363,13 +2372,17 @@ clk_en_err: #define omap_hsmmc_resume NULL #endif -static struct platform_driver omap_hsmmc_driver = { - .remove = omap_hsmmc_remove, +static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { .suspend = omap_hsmmc_suspend, .resume = omap_hsmmc_resume, +}; + +static struct platform_driver omap_hsmmc_driver = { + .remove = omap_hsmmc_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .pm = &omap_hsmmc_dev_pm_ops, }, }; diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index e4f00e70a749..0a4e43f37140 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -813,7 +813,7 @@ static int pxamci_suspend(struct device *dev) int ret = 0; if (mmc) - ret = mmc_suspend_host(mmc, PMSG_SUSPEND); + ret = mmc_suspend_host(mmc); return ret; } diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 2fdf7689ae6c..2e16e0a90a5e 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1881,9 +1881,8 @@ MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); static int s3cmci_suspend(struct device *dev) { struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - struct pm_message event = { PM_EVENT_SUSPEND }; - return mmc_suspend_host(mmc, event); + return mmc_suspend_host(mmc); } static int s3cmci_resume(struct device *dev) diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index 7802a543d8fc..a2e9820cd42f 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -89,7 +89,7 @@ static int sdhci_of_suspend(struct of_device *ofdev, pm_message_t state) { struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); - return mmc_suspend_host(host->mmc, state); + return mmc_suspend_host(host->mmc); } static int sdhci_of_resume(struct of_device *ofdev) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d5b11a17e648..c8623de13af3 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -129,12 +129,12 @@ struct sdhci_of_data sdhci_esdhc = { SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | SDHCI_QUIRK_NO_CARD_NO_RESET, .ops = { - .readl = sdhci_be32bs_readl, - .readw = esdhc_readw, - .readb = sdhci_be32bs_readb, - .writel = sdhci_be32bs_writel, - .writew = esdhc_writew, - .writeb = esdhc_writeb, + .read_l = sdhci_be32bs_readl, + .read_w = esdhc_readw, + .read_b = sdhci_be32bs_readb, + .write_l = sdhci_be32bs_writel, + .write_w = esdhc_writew, + .write_b = esdhc_writeb, .set_clock = esdhc_set_clock, .enable_dma = esdhc_enable_dma, .get_max_clock = esdhc_get_max_clock, diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 35117f3ed757..68ddb7546ae2 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -55,11 +55,11 @@ struct sdhci_of_data sdhci_hlwd = { .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE, .ops = { - .readl = sdhci_be32bs_readl, - .readw = sdhci_be32bs_readw, - .readb = sdhci_be32bs_readb, - .writel = sdhci_hlwd_writel, - .writew = sdhci_hlwd_writew, - .writeb = sdhci_hlwd_writeb, + .read_l = sdhci_be32bs_readl, + .read_w = sdhci_be32bs_readw, + .read_b = sdhci_be32bs_readb, + .write_l = sdhci_hlwd_writel, + .write_w = sdhci_hlwd_writew, + .write_b = sdhci_hlwd_writeb, }, }; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 6701af629c30..65483fdea45b 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -628,7 +628,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pci_slot)); if (IS_ERR(host)) { dev_err(&pdev->dev, "cannot allocate host\n"); - return ERR_PTR(PTR_ERR(host)); + return ERR_CAST(host); } slot = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 297f40ae6ad5..b6ee0d719698 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -29,6 +29,7 @@ #include #include +#include #include "sdhci.h" @@ -49,19 +50,18 @@ static struct sdhci_ops sdhci_pltfm_ops = { static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) { + struct sdhci_pltfm_data *pdata = pdev->dev.platform_data; struct sdhci_host *host; struct resource *iomem; int ret; - BUG_ON(pdev == NULL); - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { ret = -ENOMEM; goto err; } - if (resource_size(iomem) != 0x100) + if (resource_size(iomem) < 0x100) dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); @@ -76,7 +76,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) } host->hw_name = "platform"; - host->ops = &sdhci_pltfm_ops; + if (pdata && pdata->ops) + host->ops = pdata->ops; + else + host->ops = &sdhci_pltfm_ops; + if (pdata) + host->quirks = pdata->quirks; host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), @@ -93,6 +98,12 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) goto err_remap; } + if (pdata && pdata->init) { + ret = pdata->init(host); + if (ret) + goto err_plat_init; + } + ret = sdhci_add_host(host); if (ret) goto err_add_host; @@ -102,6 +113,9 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) return 0; err_add_host: + if (pdata && pdata->exit) + pdata->exit(host); +err_plat_init: iounmap(host->ioaddr); err_remap: release_mem_region(iomem->start, resource_size(iomem)); @@ -114,6 +128,7 @@ err: static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) { + struct sdhci_pltfm_data *pdata = pdev->dev.platform_data; struct sdhci_host *host = platform_get_drvdata(pdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int dead; @@ -125,6 +140,8 @@ static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) dead = 1; sdhci_remove_host(host, dead); + if (pdata && pdata->exit) + pdata->exit(host); iounmap(host->ioaddr); release_mem_region(iomem->start, resource_size(iomem)); sdhci_free_host(host); @@ -165,4 +182,3 @@ MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver"); MODULE_AUTHOR("Mocean Laboratories "); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:sdhci"); - diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 2136794c0cfa..af217924a76e 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -317,12 +317,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) host->irq = irq; /* Setup quirks for the controller */ - - /* Currently with ADMA enabled we are getting some length - * interrupts that are not being dealt with, do disable - * ADMA until this is sorted out. */ - host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE; + host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; #ifndef CONFIG_MMC_SDHCI_S3C_DMA @@ -330,9 +325,6 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) * support as well. */ host->quirks |= SDHCI_QUIRK_BROKEN_DMA; - /* PIO currently has problems with multi-block IO */ - host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; - #endif /* CONFIG_MMC_SDHCI_S3C_DMA */ /* It seems we do not get an DATA transfer complete on non-busy diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c new file mode 100644 index 000000000000..d70c54c7b70a --- /dev/null +++ b/drivers/mmc/host/sdhci-spear.c @@ -0,0 +1,298 @@ +/* + * drivers/mmc/host/sdhci-spear.c + * + * Support of SDHCI platform devices for spear soc family + * + * Copyright (C) 2010 ST Microelectronics + * Viresh Kumar + * + * Inspired by sdhci-pltfm.c + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdhci.h" + +struct spear_sdhci { + struct clk *clk; + struct sdhci_plat_data *data; +}; + +/* sdhci ops */ +static struct sdhci_ops sdhci_pltfm_ops = { + /* Nothing to do for now. */ +}; + +/* gpio card detection interrupt handler */ +static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct sdhci_host *host = platform_get_drvdata(pdev); + struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); + unsigned long gpio_irq_type; + int val; + + val = gpio_get_value(sdhci->data->card_int_gpio); + + /* val == 1 -> card removed, val == 0 -> card inserted */ + /* if card removed - set irq for low level, else vice versa */ + gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; + set_irq_type(irq, gpio_irq_type); + + if (sdhci->data->card_power_gpio >= 0) { + if (!sdhci->data->power_always_enb) { + /* if card inserted, give power, otherwise remove it */ + val = sdhci->data->power_active_high ? !val : val ; + gpio_set_value(sdhci->data->card_power_gpio, val); + } + } + + /* inform sdhci driver about card insertion/removal */ + tasklet_schedule(&host->card_tasklet); + + return IRQ_HANDLED; +} + +static int __devinit sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct resource *iomem; + struct spear_sdhci *sdhci; + int ret; + + BUG_ON(pdev == NULL); + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "memory resource not defined\n"); + goto err; + } + + if (!request_mem_region(iomem->start, resource_size(iomem), + "spear-sdhci")) { + ret = -EBUSY; + dev_dbg(&pdev->dev, "cannot request region\n"); + goto err; + } + + sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); + if (!sdhci) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); + goto err_kzalloc; + } + + /* clk enable */ + sdhci->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(sdhci->clk)) { + ret = PTR_ERR(sdhci->clk); + dev_dbg(&pdev->dev, "Error getting clock\n"); + goto err_clk_get; + } + + ret = clk_enable(sdhci->clk); + if (ret) { + dev_dbg(&pdev->dev, "Error enabling clock\n"); + goto err_clk_enb; + } + + /* overwrite platform_data */ + sdhci->data = dev_get_platdata(&pdev->dev); + pdev->dev.platform_data = sdhci; + + if (pdev->dev.parent) + host = sdhci_alloc_host(pdev->dev.parent, 0); + else + host = sdhci_alloc_host(&pdev->dev, 0); + + if (IS_ERR(host)) { + ret = PTR_ERR(host); + dev_dbg(&pdev->dev, "error allocating host\n"); + goto err_alloc_host; + } + + host->hw_name = "sdhci"; + host->ops = &sdhci_pltfm_ops; + host->irq = platform_get_irq(pdev, 0); + host->quirks = SDHCI_QUIRK_BROKEN_ADMA; + + host->ioaddr = ioremap(iomem->start, resource_size(iomem)); + if (!host->ioaddr) { + ret = -ENOMEM; + dev_dbg(&pdev->dev, "failed to remap registers\n"); + goto err_ioremap; + } + + ret = sdhci_add_host(host); + if (ret) { + dev_dbg(&pdev->dev, "error adding host\n"); + goto err_add_host; + } + + platform_set_drvdata(pdev, host); + + /* + * It is optional to use GPIOs for sdhci Power control & sdhci card + * interrupt detection. If sdhci->data is NULL, then use original sdhci + * lines otherwise GPIO lines. + * If GPIO is selected for power control, then power should be disabled + * after card removal and should be enabled when card insertion + * interrupt occurs + */ + if (!sdhci->data) + return 0; + + if (sdhci->data->card_power_gpio >= 0) { + int val = 0; + + ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); + if (ret < 0) { + dev_dbg(&pdev->dev, "gpio request fail: %d\n", + sdhci->data->card_power_gpio); + goto err_pgpio_request; + } + + if (sdhci->data->power_always_enb) + val = sdhci->data->power_active_high; + else + val = !sdhci->data->power_active_high; + + ret = gpio_direction_output(sdhci->data->card_power_gpio, val); + if (ret) { + dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", + sdhci->data->card_power_gpio); + goto err_pgpio_direction; + } + + gpio_set_value(sdhci->data->card_power_gpio, 1); + } + + if (sdhci->data->card_int_gpio >= 0) { + ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); + if (ret < 0) { + dev_dbg(&pdev->dev, "gpio request fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_request; + } + + ret = gpio_direction_input(sdhci->data->card_int_gpio); + if (ret) { + dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_direction; + } + ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), + sdhci_gpio_irq, IRQF_TRIGGER_LOW, + mmc_hostname(host->mmc), pdev); + if (ret) { + dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", + sdhci->data->card_int_gpio); + goto err_igpio_request_irq; + } + + } + + return 0; + +err_igpio_request_irq: +err_igpio_direction: + if (sdhci->data->card_int_gpio >= 0) + gpio_free(sdhci->data->card_int_gpio); +err_igpio_request: +err_pgpio_direction: + if (sdhci->data->card_power_gpio >= 0) + gpio_free(sdhci->data->card_power_gpio); +err_pgpio_request: + platform_set_drvdata(pdev, NULL); + sdhci_remove_host(host, 1); +err_add_host: + iounmap(host->ioaddr); +err_ioremap: + sdhci_free_host(host); +err_alloc_host: + clk_disable(sdhci->clk); +err_clk_enb: + clk_put(sdhci->clk); +err_clk_get: + kfree(sdhci); +err_kzalloc: + release_mem_region(iomem->start, resource_size(iomem)); +err: + dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); + return ret; +} + +static int __devexit sdhci_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); + int dead; + u32 scratch; + + if (sdhci->data) { + if (sdhci->data->card_int_gpio >= 0) { + free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev); + gpio_free(sdhci->data->card_int_gpio); + } + + if (sdhci->data->card_power_gpio >= 0) + gpio_free(sdhci->data->card_power_gpio); + } + + platform_set_drvdata(pdev, NULL); + dead = 0; + scratch = readl(host->ioaddr + SDHCI_INT_STATUS); + if (scratch == (u32)-1) + dead = 1; + + sdhci_remove_host(host, dead); + iounmap(host->ioaddr); + sdhci_free_host(host); + clk_disable(sdhci->clk); + clk_put(sdhci->clk); + kfree(sdhci); + if (iomem) + release_mem_region(iomem->start, resource_size(iomem)); + + return 0; +} + +static struct platform_driver sdhci_driver = { + .driver = { + .name = "sdhci", + .owner = THIS_MODULE, + }, + .probe = sdhci_probe, + .remove = __devexit_p(sdhci_remove), +}; + +static int __init sdhci_init(void) +{ + return platform_driver_register(&sdhci_driver); +} +module_init(sdhci_init); + +static void __exit sdhci_exit(void) +{ + platform_driver_unregister(&sdhci_driver); +} +module_exit(sdhci_exit); + +MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); +MODULE_AUTHOR("Viresh Kumar "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 9d4fdfa685e5..c6d1bd8d4ac4 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -496,12 +496,22 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); } - /* - * Add a terminating entry. - */ + if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { + /* + * Mark the last descriptor as the terminating descriptor + */ + if (desc != host->adma_desc) { + desc -= 8; + desc[0] |= 0x2; /* end */ + } + } else { + /* + * Add a terminating entry. + */ - /* nop, end, valid */ - sdhci_set_adma_desc(desc, 0, 0, 0x3); + /* nop, end, valid */ + sdhci_set_adma_desc(desc, 0, 0, 0x3); + } /* * Resync align buffer as we might have changed it. @@ -1587,7 +1597,7 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) sdhci_disable_card_detection(host); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); if (ret) return ret; @@ -1744,7 +1754,8 @@ int sdhci_add_host(struct sdhci_host *host) host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; host->max_clk *= 1000000; - if (host->max_clk == 0) { + if (host->max_clk == 0 || host->quirks & + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { if (!host->ops->get_max_clock) { printk(KERN_ERR "%s: Hardware doesn't specify base clock " diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 842f46f94284..c8468134adc9 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -127,7 +127,7 @@ #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ - SDHCI_INT_DATA_END_BIT | SDHCI_ADMA_ERROR) + SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR) #define SDHCI_INT_ALL_MASK ((unsigned int)-1) #define SDHCI_ACMD12_ERR 0x3C @@ -236,6 +236,10 @@ struct sdhci_host { #define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23) /* Controller uses SDCLK instead of TMCLK for data timeouts */ #define SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK (1<<24) +/* Controller reports wrong base clock capability */ +#define SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN (1<<25) +/* Controller cannot support End Attribute in NOP ADMA descriptor */ +#define SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC (1<<26) int irq; /* Device IRQ */ void __iomem * ioaddr; /* Mapped address */ @@ -294,12 +298,12 @@ struct sdhci_host { struct sdhci_ops { #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS - u32 (*readl)(struct sdhci_host *host, int reg); - u16 (*readw)(struct sdhci_host *host, int reg); - u8 (*readb)(struct sdhci_host *host, int reg); - void (*writel)(struct sdhci_host *host, u32 val, int reg); - void (*writew)(struct sdhci_host *host, u16 val, int reg); - void (*writeb)(struct sdhci_host *host, u8 val, int reg); + u32 (*read_l)(struct sdhci_host *host, int reg); + u16 (*read_w)(struct sdhci_host *host, int reg); + u8 (*read_b)(struct sdhci_host *host, int reg); + void (*write_l)(struct sdhci_host *host, u32 val, int reg); + void (*write_w)(struct sdhci_host *host, u16 val, int reg); + void (*write_b)(struct sdhci_host *host, u8 val, int reg); #endif void (*set_clock)(struct sdhci_host *host, unsigned int clock); @@ -314,48 +318,48 @@ struct sdhci_ops { static inline void sdhci_writel(struct sdhci_host *host, u32 val, int reg) { - if (unlikely(host->ops->writel)) - host->ops->writel(host, val, reg); + if (unlikely(host->ops->write_l)) + host->ops->write_l(host, val, reg); else writel(val, host->ioaddr + reg); } static inline void sdhci_writew(struct sdhci_host *host, u16 val, int reg) { - if (unlikely(host->ops->writew)) - host->ops->writew(host, val, reg); + if (unlikely(host->ops->write_w)) + host->ops->write_w(host, val, reg); else writew(val, host->ioaddr + reg); } static inline void sdhci_writeb(struct sdhci_host *host, u8 val, int reg) { - if (unlikely(host->ops->writeb)) - host->ops->writeb(host, val, reg); + if (unlikely(host->ops->write_b)) + host->ops->write_b(host, val, reg); else writeb(val, host->ioaddr + reg); } static inline u32 sdhci_readl(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readl)) - return host->ops->readl(host, reg); + if (unlikely(host->ops->read_l)) + return host->ops->read_l(host, reg); else return readl(host->ioaddr + reg); } static inline u16 sdhci_readw(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readw)) - return host->ops->readw(host, reg); + if (unlikely(host->ops->read_w)) + return host->ops->read_w(host, reg); else return readw(host->ioaddr + reg); } static inline u8 sdhci_readb(struct sdhci_host *host, int reg) { - if (unlikely(host->ops->readb)) - return host->ops->readb(host, reg); + if (unlikely(host->ops->read_b)) + return host->ops->read_b(host, reg); else return readb(host->ioaddr + reg); } diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index cb41e9c3ac07..e7507af3856e 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -519,7 +519,7 @@ static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); - mmc_suspend_host(mmc, PMSG_SUSPEND); + mmc_suspend_host(mmc); return 0; } diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c new file mode 100644 index 000000000000..eb97830c0344 --- /dev/null +++ b/drivers/mmc/host/sh_mmcif.c @@ -0,0 +1,965 @@ +/* + * MMCIF eMMC driver. + * + * Copyright (C) 2010 Renesas Solutions Corp. + * Yusuke Goda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * + * TODO + * 1. DMA + * 2. Power management + * 3. Handle MMC errors better + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "sh_mmcif" +#define DRIVER_VERSION "2010-04-28" + +#define MMCIF_CE_CMD_SET 0x00000000 +#define MMCIF_CE_ARG 0x00000008 +#define MMCIF_CE_ARG_CMD12 0x0000000C +#define MMCIF_CE_CMD_CTRL 0x00000010 +#define MMCIF_CE_BLOCK_SET 0x00000014 +#define MMCIF_CE_CLK_CTRL 0x00000018 +#define MMCIF_CE_BUF_ACC 0x0000001C +#define MMCIF_CE_RESP3 0x00000020 +#define MMCIF_CE_RESP2 0x00000024 +#define MMCIF_CE_RESP1 0x00000028 +#define MMCIF_CE_RESP0 0x0000002C +#define MMCIF_CE_RESP_CMD12 0x00000030 +#define MMCIF_CE_DATA 0x00000034 +#define MMCIF_CE_INT 0x00000040 +#define MMCIF_CE_INT_MASK 0x00000044 +#define MMCIF_CE_HOST_STS1 0x00000048 +#define MMCIF_CE_HOST_STS2 0x0000004C +#define MMCIF_CE_VERSION 0x0000007C + +/* CE_CMD_SET */ +#define CMD_MASK 0x3f000000 +#define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22)) +#define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */ +#define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22)) /* R2 */ +#define CMD_SET_RBSY (1 << 21) /* R1b */ +#define CMD_SET_CCSEN (1 << 20) +#define CMD_SET_WDAT (1 << 19) /* 1: on data, 0: no data */ +#define CMD_SET_DWEN (1 << 18) /* 1: write, 0: read */ +#define CMD_SET_CMLTE (1 << 17) /* 1: multi block trans, 0: single */ +#define CMD_SET_CMD12EN (1 << 16) /* 1: CMD12 auto issue */ +#define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14)) /* index check */ +#define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14)) /* check bits check */ +#define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14)) /* no check */ +#define CMD_SET_CRC7C ((0 << 13) | (0 << 12)) /* CRC7 check*/ +#define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12)) /* check bits check*/ +#define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12)) /* internal CRC7 check*/ +#define CMD_SET_CRC16C (1 << 10) /* 0: CRC16 check*/ +#define CMD_SET_CRCSTE (1 << 8) /* 1: not receive CRC status */ +#define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ +#define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ +#define CMD_SET_CCSH (1 << 5) +#define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ +#define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ +#define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ + +/* CE_CMD_CTRL */ +#define CMD_CTRL_BREAK (1 << 0) + +/* CE_BLOCK_SET */ +#define BLOCK_SIZE_MASK 0x0000ffff + +/* CE_CLK_CTRL */ +#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */ +#define CLK_CLEAR ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) +#define CLK_SUP_PCLK ((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) +#define SRSPTO_256 ((1 << 13) | (0 << 12)) /* resp timeout */ +#define SRBSYTO_29 ((1 << 11) | (1 << 10) | \ + (1 << 9) | (1 << 8)) /* resp busy timeout */ +#define SRWDTO_29 ((1 << 7) | (1 << 6) | \ + (1 << 5) | (1 << 4)) /* read/write timeout */ +#define SCCSTO_29 ((1 << 3) | (1 << 2) | \ + (1 << 1) | (1 << 0)) /* ccs timeout */ + +/* CE_BUF_ACC */ +#define BUF_ACC_DMAWEN (1 << 25) +#define BUF_ACC_DMAREN (1 << 24) +#define BUF_ACC_BUSW_32 (0 << 17) +#define BUF_ACC_BUSW_16 (1 << 17) +#define BUF_ACC_ATYP (1 << 16) + +/* CE_INT */ +#define INT_CCSDE (1 << 29) +#define INT_CMD12DRE (1 << 26) +#define INT_CMD12RBE (1 << 25) +#define INT_CMD12CRE (1 << 24) +#define INT_DTRANE (1 << 23) +#define INT_BUFRE (1 << 22) +#define INT_BUFWEN (1 << 21) +#define INT_BUFREN (1 << 20) +#define INT_CCSRCV (1 << 19) +#define INT_RBSYE (1 << 17) +#define INT_CRSPE (1 << 16) +#define INT_CMDVIO (1 << 15) +#define INT_BUFVIO (1 << 14) +#define INT_WDATERR (1 << 11) +#define INT_RDATERR (1 << 10) +#define INT_RIDXERR (1 << 9) +#define INT_RSPERR (1 << 8) +#define INT_CCSTO (1 << 5) +#define INT_CRCSTO (1 << 4) +#define INT_WDATTO (1 << 3) +#define INT_RDATTO (1 << 2) +#define INT_RBSYTO (1 << 1) +#define INT_RSPTO (1 << 0) +#define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \ + INT_RDATERR | INT_RIDXERR | INT_RSPERR | \ + INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ + INT_RDATTO | INT_RBSYTO | INT_RSPTO) + +/* CE_INT_MASK */ +#define MASK_ALL 0x00000000 +#define MASK_MCCSDE (1 << 29) +#define MASK_MCMD12DRE (1 << 26) +#define MASK_MCMD12RBE (1 << 25) +#define MASK_MCMD12CRE (1 << 24) +#define MASK_MDTRANE (1 << 23) +#define MASK_MBUFRE (1 << 22) +#define MASK_MBUFWEN (1 << 21) +#define MASK_MBUFREN (1 << 20) +#define MASK_MCCSRCV (1 << 19) +#define MASK_MRBSYE (1 << 17) +#define MASK_MCRSPE (1 << 16) +#define MASK_MCMDVIO (1 << 15) +#define MASK_MBUFVIO (1 << 14) +#define MASK_MWDATERR (1 << 11) +#define MASK_MRDATERR (1 << 10) +#define MASK_MRIDXERR (1 << 9) +#define MASK_MRSPERR (1 << 8) +#define MASK_MCCSTO (1 << 5) +#define MASK_MCRCSTO (1 << 4) +#define MASK_MWDATTO (1 << 3) +#define MASK_MRDATTO (1 << 2) +#define MASK_MRBSYTO (1 << 1) +#define MASK_MRSPTO (1 << 0) + +/* CE_HOST_STS1 */ +#define STS1_CMDSEQ (1 << 31) + +/* CE_HOST_STS2 */ +#define STS2_CRCSTE (1 << 31) +#define STS2_CRC16E (1 << 30) +#define STS2_AC12CRCE (1 << 29) +#define STS2_RSPCRC7E (1 << 28) +#define STS2_CRCSTEBE (1 << 27) +#define STS2_RDATEBE (1 << 26) +#define STS2_AC12REBE (1 << 25) +#define STS2_RSPEBE (1 << 24) +#define STS2_AC12IDXE (1 << 23) +#define STS2_RSPIDXE (1 << 22) +#define STS2_CCSTO (1 << 15) +#define STS2_RDATTO (1 << 14) +#define STS2_DATBSYTO (1 << 13) +#define STS2_CRCSTTO (1 << 12) +#define STS2_AC12BSYTO (1 << 11) +#define STS2_RSPBSYTO (1 << 10) +#define STS2_AC12RSPTO (1 << 9) +#define STS2_RSPTO (1 << 8) +#define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \ + STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE) +#define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \ + STS2_DATBSYTO | STS2_CRCSTTO | \ + STS2_AC12BSYTO | STS2_RSPBSYTO | \ + STS2_AC12RSPTO | STS2_RSPTO) + +/* CE_VERSION */ +#define SOFT_RST_ON (1 << 31) +#define SOFT_RST_OFF (0 << 31) + +#define CLKDEV_EMMC_DATA 52000000 /* 52MHz */ +#define CLKDEV_MMC_DATA 20000000 /* 20MHz */ +#define CLKDEV_INIT 400000 /* 400 KHz */ + +struct sh_mmcif_host { + struct mmc_host *mmc; + struct mmc_data *data; + struct mmc_command *cmd; + struct platform_device *pd; + struct clk *hclk; + unsigned int clk; + int bus_width; + u16 wait_int; + u16 sd_error; + long timeout; + void __iomem *addr; + wait_queue_head_t intr_wait; +}; + +static inline u32 sh_mmcif_readl(struct sh_mmcif_host *host, unsigned int reg) +{ + return readl(host->addr + reg); +} + +static inline void sh_mmcif_writel(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(val, host->addr + reg); +} + +static inline void sh_mmcif_bitset(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(val | sh_mmcif_readl(host, reg), host->addr + reg); +} + +static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, + unsigned int reg, u32 val) +{ + writel(~val & sh_mmcif_readl(host, reg), host->addr + reg); +} + + +static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) +{ + struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); + sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); + + if (!clk) + return; + if (p->sup_pclk && clk == host->clk) + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); + else + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & + (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); + + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); +} + +static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) +{ + u32 tmp; + + tmp = 0x010f0000 & sh_mmcif_readl(host, MMCIF_CE_CLK_CTRL); + + sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_ON); + sh_mmcif_writel(host, MMCIF_CE_VERSION, SOFT_RST_OFF); + sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | + SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + /* byte swap on */ + sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); +} + +static int sh_mmcif_error_manage(struct sh_mmcif_host *host) +{ + u32 state1, state2; + int ret, timeout = 10000000; + + host->sd_error = 0; + host->wait_int = 0; + + state1 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS1); + state2 = sh_mmcif_readl(host, MMCIF_CE_HOST_STS2); + pr_debug("%s: ERR HOST_STS1 = %08x\n", \ + DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS1)); + pr_debug("%s: ERR HOST_STS2 = %08x\n", \ + DRIVER_NAME, sh_mmcif_readl(host, MMCIF_CE_HOST_STS2)); + + if (state1 & STS1_CMDSEQ) { + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK); + sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); + while (1) { + timeout--; + if (timeout < 0) { + pr_err(DRIVER_NAME": Forceed end of " \ + "command sequence timeout err\n"); + return -EIO; + } + if (!(sh_mmcif_readl(host, MMCIF_CE_HOST_STS1) + & STS1_CMDSEQ)) + break; + mdelay(1); + } + sh_mmcif_sync_reset(host); + pr_debug(DRIVER_NAME": Forced end of command sequence\n"); + return -EIO; + } + + if (state2 & STS2_CRC_ERR) { + pr_debug(DRIVER_NAME": Happened CRC error\n"); + ret = -EIO; + } else if (state2 & STS2_TIMEOUT_ERR) { + pr_debug(DRIVER_NAME": Happened Timeout error\n"); + ret = -ETIMEDOUT; + } else { + pr_debug(DRIVER_NAME": Happened End/Index error\n"); + ret = -EIO; + } + return ret; +} + +static int sh_mmcif_single_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, *p = sg_virt(data->sg); + + host->wait_int = 0; + + /* buf read enable */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + blocksize = (BLOCK_SIZE_MASK & + sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + for (i = 0; i < blocksize / 4; i++) + *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + + /* buffer read end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + return 0; +} + +static int sh_mmcif_multi_read(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, j, sec, *p; + + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + for (j = 0; j < data->sg_len; j++) { + p = sg_virt(data->sg); + host->wait_int = 0; + for (sec = 0; sec < data->sg->length / blocksize; sec++) { + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + /* buf read enable */ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + + if (host->wait_int != 1 && + (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + for (i = 0; i < blocksize / 4; i++) + *p++ = sh_mmcif_readl(host, MMCIF_CE_DATA); + } + if (j < data->sg_len - 1) + data->sg++; + } + return 0; +} + +static int sh_mmcif_single_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 blocksize, i, *p = sg_virt(data->sg); + + host->wait_int = 0; + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + + /* buf write enable */ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + blocksize = (BLOCK_SIZE_MASK & + sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET)) + 3; + for (i = 0; i < blocksize / 4; i++) + sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + + /* buffer write end */ + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + return 0; +} + +static int sh_mmcif_multi_write(struct sh_mmcif_host *host, + struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + long time; + u32 i, sec, j, blocksize, *p; + + blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host, MMCIF_CE_BLOCK_SET); + + for (j = 0; j < data->sg_len; j++) { + p = sg_virt(data->sg); + host->wait_int = 0; + for (sec = 0; sec < data->sg->length / blocksize; sec++) { + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + /* buf write enable*/ + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + + if (host->wait_int != 1 && + (time == 0 || host->sd_error != 0)) + return sh_mmcif_error_manage(host); + + host->wait_int = 0; + for (i = 0; i < blocksize / 4; i++) + sh_mmcif_writel(host, MMCIF_CE_DATA, *p++); + } + if (j < data->sg_len - 1) + data->sg++; + } + return 0; +} + +static void sh_mmcif_get_response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + if (cmd->flags & MMC_RSP_136) { + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP3); + cmd->resp[1] = sh_mmcif_readl(host, MMCIF_CE_RESP2); + cmd->resp[2] = sh_mmcif_readl(host, MMCIF_CE_RESP1); + cmd->resp[3] = sh_mmcif_readl(host, MMCIF_CE_RESP0); + } else + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP0); +} + +static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host, + struct mmc_command *cmd) +{ + cmd->resp[0] = sh_mmcif_readl(host, MMCIF_CE_RESP_CMD12); +} + +static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) +{ + u32 tmp = 0; + + /* Response Type check */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + tmp |= CMD_SET_RTYP_NO; + break; + case MMC_RSP_R1: + case MMC_RSP_R1B: + case MMC_RSP_R3: + tmp |= CMD_SET_RTYP_6B; + break; + case MMC_RSP_R2: + tmp |= CMD_SET_RTYP_17B; + break; + default: + pr_err(DRIVER_NAME": Not support type response.\n"); + break; + } + switch (opc) { + /* RBSY */ + case MMC_SWITCH: + case MMC_STOP_TRANSMISSION: + case MMC_SET_WRITE_PROT: + case MMC_CLR_WRITE_PROT: + case MMC_ERASE: + case MMC_GEN_CMD: + tmp |= CMD_SET_RBSY; + break; + } + /* WDAT / DATW */ + if (host->data) { + tmp |= CMD_SET_WDAT; + switch (host->bus_width) { + case MMC_BUS_WIDTH_1: + tmp |= CMD_SET_DATW_1; + break; + case MMC_BUS_WIDTH_4: + tmp |= CMD_SET_DATW_4; + break; + case MMC_BUS_WIDTH_8: + tmp |= CMD_SET_DATW_8; + break; + default: + pr_err(DRIVER_NAME": Not support bus width.\n"); + break; + } + } + /* DWEN */ + if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) + tmp |= CMD_SET_DWEN; + /* CMLTE/CMD12EN */ + if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) { + tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN; + sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, + mrq->data->blocks << 16); + } + /* RIDXC[1:0] check bits */ + if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_RIDXC_BITS; + /* RCRC7C[1:0] check bits */ + if (opc == MMC_SEND_OP_COND) + tmp |= CMD_SET_CRC7C_BITS; + /* RCRC7C[1:0] internal CRC7 */ + if (opc == MMC_ALL_SEND_CID || + opc == MMC_SEND_CSD || opc == MMC_SEND_CID) + tmp |= CMD_SET_CRC7C_INTERNAL; + + return opc = ((opc << 24) | tmp); +} + +static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host, + struct mmc_request *mrq, u32 opc) +{ + u32 ret; + + switch (opc) { + case MMC_READ_MULTIPLE_BLOCK: + ret = sh_mmcif_multi_read(host, mrq); + break; + case MMC_WRITE_MULTIPLE_BLOCK: + ret = sh_mmcif_multi_write(host, mrq); + break; + case MMC_WRITE_BLOCK: + ret = sh_mmcif_single_write(host, mrq); + break; + case MMC_READ_SINGLE_BLOCK: + case MMC_SEND_EXT_CSD: + ret = sh_mmcif_single_read(host, mrq); + break; + default: + pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc); + ret = -EINVAL; + break; + } + return ret; +} + +static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + long time; + int ret = 0, mask = 0; + u32 opc = cmd->opcode; + + host->cmd = cmd; + + switch (opc) { + /* respons busy check */ + case MMC_SWITCH: + case MMC_STOP_TRANSMISSION: + case MMC_SET_WRITE_PROT: + case MMC_CLR_WRITE_PROT: + case MMC_ERASE: + case MMC_GEN_CMD: + mask = MASK_MRBSYE; + break; + default: + mask = MASK_MCRSPE; + break; + } + mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | + MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | + MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | + MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; + + if (host->data) { + sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, 0); + sh_mmcif_writel(host, MMCIF_CE_BLOCK_SET, mrq->data->blksz); + } + opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); + + sh_mmcif_writel(host, MMCIF_CE_INT, 0xD80430C0); + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, mask); + /* set arg */ + sh_mmcif_writel(host, MMCIF_CE_ARG, cmd->arg); + host->wait_int = 0; + /* set cmd */ + sh_mmcif_writel(host, MMCIF_CE_CMD_SET, opc); + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && time == 0) { + cmd->error = sh_mmcif_error_manage(host); + return; + } + if (host->sd_error) { + switch (cmd->opcode) { + case MMC_ALL_SEND_CID: + case MMC_SELECT_CARD: + case MMC_APP_CMD: + cmd->error = -ETIMEDOUT; + break; + default: + pr_debug("%s: Cmd(d'%d) err\n", + DRIVER_NAME, cmd->opcode); + cmd->error = sh_mmcif_error_manage(host); + break; + } + host->sd_error = 0; + host->wait_int = 0; + return; + } + if (!(cmd->flags & MMC_RSP_PRESENT)) { + cmd->error = ret; + host->wait_int = 0; + return; + } + if (host->wait_int == 1) { + sh_mmcif_get_response(host, cmd); + host->wait_int = 0; + } + if (host->data) { + ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); + if (ret < 0) + mrq->data->bytes_xfered = 0; + else + mrq->data->bytes_xfered = + mrq->data->blocks * mrq->data->blksz; + } + cmd->error = ret; +} + +static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, + struct mmc_request *mrq, struct mmc_command *cmd) +{ + long time; + + if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); + else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); + else { + pr_err(DRIVER_NAME": not support stop cmd\n"); + cmd->error = sh_mmcif_error_manage(host); + return; + } + + time = wait_event_interruptible_timeout(host->intr_wait, + host->wait_int == 1 || + host->sd_error == 1, host->timeout); + if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) { + cmd->error = sh_mmcif_error_manage(host); + return; + } + sh_mmcif_get_cmd12response(host, cmd); + host->wait_int = 0; + cmd->error = 0; +} + +static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + + switch (mrq->cmd->opcode) { + /* MMCIF does not support SD/SDIO command */ + case SD_IO_SEND_OP_COND: + case MMC_APP_CMD: + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, mrq); + return; + case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ + if (!mrq->data) { + /* send_if_cond cmd (not support) */ + mrq->cmd->error = -ETIMEDOUT; + mmc_request_done(mmc, mrq); + return; + } + break; + default: + break; + } + host->data = mrq->data; + sh_mmcif_start_cmd(host, mrq, mrq->cmd); + host->data = NULL; + + if (mrq->cmd->error != 0) { + mmc_request_done(mmc, mrq); + return; + } + if (mrq->stop) + sh_mmcif_stop_cmd(host, mrq, mrq->stop); + mmc_request_done(mmc, mrq); +} + +static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sh_mmcif_host *host = mmc_priv(mmc); + struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + + if (ios->power_mode == MMC_POWER_OFF) { + /* clock stop */ + sh_mmcif_clock_control(host, 0); + if (p->down_pwr) + p->down_pwr(host->pd); + return; + } else if (ios->power_mode == MMC_POWER_UP) { + if (p->set_pwr) + p->set_pwr(host->pd, ios->power_mode); + } + + if (ios->clock) + sh_mmcif_clock_control(host, ios->clock); + + host->bus_width = ios->bus_width; +} + +static struct mmc_host_ops sh_mmcif_ops = { + .request = sh_mmcif_request, + .set_ios = sh_mmcif_set_ios, +}; + +static void sh_mmcif_detect(struct mmc_host *mmc) +{ + mmc_detect_change(mmc, 0); +} + +static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) +{ + struct sh_mmcif_host *host = dev_id; + u32 state = 0; + int err = 0; + + state = sh_mmcif_readl(host, MMCIF_CE_INT); + + if (state & INT_RBSYE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~(INT_RBSYE | INT_CRSPE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); + } else if (state & INT_CRSPE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_CRSPE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); + } else if (state & INT_BUFREN) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFREN); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + } else if (state & INT_BUFWEN) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFWEN); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + } else if (state & INT_CMD12DRE) { + sh_mmcif_writel(host, MMCIF_CE_INT, + ~(INT_CMD12DRE | INT_CMD12RBE | + INT_CMD12CRE | INT_BUFRE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); + } else if (state & INT_BUFRE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_BUFRE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); + } else if (state & INT_DTRANE) { + sh_mmcif_writel(host, MMCIF_CE_INT, ~INT_DTRANE); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); + } else if (state & INT_CMD12RBE) { + sh_mmcif_writel(host, MMCIF_CE_INT, + ~(INT_CMD12RBE | INT_CMD12CRE)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); + } else if (state & INT_ERR_STS) { + /* err interrupts */ + sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); + err = 1; + } else { + pr_debug("%s: Not support int\n", DRIVER_NAME); + sh_mmcif_writel(host, MMCIF_CE_INT, ~state); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); + err = 1; + } + if (err) { + host->sd_error = 1; + pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state); + } + host->wait_int = 1; + wake_up(&host->intr_wait); + + return IRQ_HANDLED; +} + +static int __devinit sh_mmcif_probe(struct platform_device *pdev) +{ + int ret = 0, irq[2]; + struct mmc_host *mmc; + struct sh_mmcif_host *host = NULL; + struct sh_mmcif_plat_data *pd = NULL; + struct resource *res; + void __iomem *reg; + char clk_name[8]; + + irq[0] = platform_get_irq(pdev, 0); + irq[1] = platform_get_irq(pdev, 1); + if (irq[0] < 0 || irq[1] < 0) { + pr_err(DRIVER_NAME": Get irq error\n"); + return -ENXIO; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "platform_get_resource error.\n"); + return -ENXIO; + } + reg = ioremap(res->start, resource_size(res)); + if (!reg) { + dev_err(&pdev->dev, "ioremap error.\n"); + return -ENOMEM; + } + pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); + if (!pd) { + dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); + ret = -ENXIO; + goto clean_up; + } + mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto clean_up; + } + host = mmc_priv(mmc); + host->mmc = mmc; + host->addr = reg; + host->timeout = 1000; + + snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); + host->hclk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(host->hclk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + ret = PTR_ERR(host->hclk); + goto clean_up1; + } + clk_enable(host->hclk); + host->clk = clk_get_rate(host->hclk); + host->pd = pdev; + + init_waitqueue_head(&host->intr_wait); + + mmc->ops = &sh_mmcif_ops; + mmc->f_max = host->clk; + /* close to 400KHz */ + if (mmc->f_max < 51200000) + mmc->f_min = mmc->f_max / 128; + else if (mmc->f_max < 102400000) + mmc->f_min = mmc->f_max / 256; + else + mmc->f_min = mmc->f_max / 512; + if (pd->ocr) + mmc->ocr_avail = pd->ocr; + mmc->caps = MMC_CAP_MMC_HIGHSPEED; + if (pd->caps) + mmc->caps |= pd->caps; + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_req_size; + + sh_mmcif_sync_reset(host); + platform_set_drvdata(pdev, host); + mmc_add_host(mmc); + + ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); + if (ret) { + pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); + goto clean_up2; + } + ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); + if (ret) { + free_irq(irq[0], host); + pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); + goto clean_up2; + } + + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + sh_mmcif_detect(host->mmc); + + pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); + pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, + sh_mmcif_readl(host, MMCIF_CE_VERSION) & 0x0000ffff); + return ret; + +clean_up2: + clk_disable(host->hclk); +clean_up1: + mmc_free_host(mmc); +clean_up: + if (reg) + iounmap(reg); + return ret; +} + +static int __devexit sh_mmcif_remove(struct platform_device *pdev) +{ + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + int irq[2]; + + sh_mmcif_writel(host, MMCIF_CE_INT_MASK, MASK_ALL); + + irq[0] = platform_get_irq(pdev, 0); + irq[1] = platform_get_irq(pdev, 1); + + if (host->addr) + iounmap(host->addr); + + platform_set_drvdata(pdev, NULL); + mmc_remove_host(host->mmc); + + free_irq(irq[0], host); + free_irq(irq[1], host); + + clk_disable(host->hclk); + mmc_free_host(host->mmc); + + return 0; +} + +static struct platform_driver sh_mmcif_driver = { + .probe = sh_mmcif_probe, + .remove = sh_mmcif_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init sh_mmcif_init(void) +{ + return platform_driver_register(&sh_mmcif_driver); +} + +static void __exit sh_mmcif_exit(void) +{ + platform_driver_unregister(&sh_mmcif_driver); +} + +module_init(sh_mmcif_init); +module_exit(sh_mmcif_exit); + + +MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS(DRIVER_NAME); +MODULE_AUTHOR("Yusuke Goda "); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 82554ddec6b3..cec99958b652 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1032,7 +1032,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - return mmc_suspend_host(tifm_get_drvdata(sock), state); + return mmc_suspend_host(tifm_get_drvdata(sock)); } static int tifm_sd_resume(struct tifm_dev *sock) diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 883fcac21004..ee7d0a5a51c4 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -768,7 +768,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) struct mmc_host *mmc = platform_get_drvdata(dev); int ret; - ret = mmc_suspend_host(mmc, state); + ret = mmc_suspend_host(mmc); /* Tell MFD core it can disable us now.*/ if (!ret && cell->disable) diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 632858a94376..19f2d72dbca5 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1280,7 +1280,7 @@ static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) via_save_pcictrlreg(host); via_save_sdcreg(host); - ret = mmc_suspend_host(host->mmc, state); + ret = mmc_suspend_host(host->mmc); pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 69efe01eece8..0012f5d13d28 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1819,7 +1819,7 @@ static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) { BUG_ON(host == NULL); - return mmc_suspend_host(host->mmc, state); + return mmc_suspend_host(host->mmc); } static int wbsd_resume(struct wbsd_host *host) diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 72ebb3f06b86..4dfa6b90c21c 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -189,8 +189,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) return new_offset; } -static int vol_cdev_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int vol_cdev_fsync(struct file *file, int datasync) { struct ubi_volume_desc *desc = file->private_data; struct ubi_device *ubi = desc->vol->ubi; diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c index 82eaf65d2d85..ea9b7a098c9b 100644 --- a/drivers/net/3c507.c +++ b/drivers/net/3c507.c @@ -551,8 +551,7 @@ static irqreturn_t el16_interrupt(int irq, void *dev_id) void __iomem *shmem; if (dev == NULL) { - pr_err("%s: net_interrupt(): irq %d for unknown device.\n", - dev->name, irq); + pr_err("net_interrupt(): irq %d for unknown device.\n", irq); return IRQ_NONE; } diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index c911bfb55b19..b9ad799c719f 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -294,7 +294,7 @@ int be_cmd_POST(struct be_adapter *adapter) } else { return 0; } - } while (timeout < 20); + } while (timeout < 40); dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); return -1; @@ -1429,7 +1429,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; - goto err; + goto err_unlock; } req = cmd->va; sge = nonembedded_sgl(wrb); @@ -1457,7 +1457,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, else status = adapter->flash_status; -err: + return status; + +err_unlock: + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1497,7 +1500,7 @@ err: return status; } -extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; @@ -1590,7 +1593,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); - req->hdr.timeout = 4; + req->hdr.timeout = cpu_to_le32(4); req->pattern = cpu_to_le64(pattern); req->src_port = cpu_to_le32(port_num); @@ -1662,7 +1665,7 @@ err: return status; } -extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, +int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index aa065c71ddd8..54b14272f333 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1861,7 +1861,7 @@ static int be_setup(struct be_adapter *adapter) goto if_destroy; } vf++; - } while (vf < num_vfs); + } } else if (!be_physfn(adapter)) { status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 05b751719bd5..2c5227c02fa0 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -63,6 +63,16 @@ config CAN_BFIN To compile this driver as a module, choose M here: the module will be called bfin_can. +config CAN_JANZ_ICAN3 + tristate "Janz VMOD-ICAN3 Intelligent CAN controller" + depends on CAN_DEV && MFD_JANZ_CMODIO + ---help--- + Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which + connects to a MODULbus carrier board. + + This driver can also be built as a module. If so, the module will be + called janz-ican3.ko. + source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/sja1000/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 7a702f28d01c..9047cd066fea 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -15,5 +15,6 @@ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o obj-$(CONFIG_CAN_BFIN) += bfin_can.o +obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c new file mode 100644 index 000000000000..6e533dcc36c0 --- /dev/null +++ b/drivers/net/can/janz-ican3.c @@ -0,0 +1,1830 @@ +/* + * Janz MODULbus VMOD-ICAN3 CAN Interface Driver + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/* the DPM has 64k of memory, organized into 256x 256 byte pages */ +#define DPM_NUM_PAGES 256 +#define DPM_PAGE_SIZE 256 +#define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE) + +/* JANZ ICAN3 "old-style" host interface queue page numbers */ +#define QUEUE_OLD_CONTROL 0 +#define QUEUE_OLD_RB0 1 +#define QUEUE_OLD_RB1 2 +#define QUEUE_OLD_WB0 3 +#define QUEUE_OLD_WB1 4 + +/* Janz ICAN3 "old-style" host interface control registers */ +#define MSYNC_PEER 0x00 /* ICAN only */ +#define MSYNC_LOCL 0x01 /* host only */ +#define TARGET_RUNNING 0x02 + +#define MSYNC_RB0 0x01 +#define MSYNC_RB1 0x02 +#define MSYNC_RBLW 0x04 +#define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1) + +#define MSYNC_WB0 0x10 +#define MSYNC_WB1 0x20 +#define MSYNC_WBLW 0x40 +#define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1) + +/* Janz ICAN3 "new-style" host interface queue page numbers */ +#define QUEUE_TOHOST 5 +#define QUEUE_FROMHOST_MID 6 +#define QUEUE_FROMHOST_HIGH 7 +#define QUEUE_FROMHOST_LOW 8 + +/* The first free page in the DPM is #9 */ +#define DPM_FREE_START 9 + +/* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */ +#define DESC_VALID 0x80 +#define DESC_WRAP 0x40 +#define DESC_INTERRUPT 0x20 +#define DESC_IVALID 0x10 +#define DESC_LEN(len) (len) + +/* Janz ICAN3 Firmware Messages */ +#define MSG_CONNECTI 0x02 +#define MSG_DISCONNECT 0x03 +#define MSG_IDVERS 0x04 +#define MSG_MSGLOST 0x05 +#define MSG_NEWHOSTIF 0x08 +#define MSG_INQUIRY 0x0a +#define MSG_SETAFILMASK 0x10 +#define MSG_INITFDPMQUEUE 0x11 +#define MSG_HWCONF 0x12 +#define MSG_FMSGLOST 0x15 +#define MSG_CEVTIND 0x37 +#define MSG_CBTRREQ 0x41 +#define MSG_COFFREQ 0x42 +#define MSG_CONREQ 0x43 +#define MSG_CCONFREQ 0x47 + +/* + * Janz ICAN3 CAN Inquiry Message Types + * + * NOTE: there appears to be a firmware bug here. You must send + * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED + * NOTE: response. The controller never responds to a message with + * NOTE: the INQUIRY_EXTENDED subspec :( + */ +#define INQUIRY_STATUS 0x00 +#define INQUIRY_TERMINATION 0x01 +#define INQUIRY_EXTENDED 0x04 + +/* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */ +#define SETAFILMASK_REJECT 0x00 +#define SETAFILMASK_FASTIF 0x02 + +/* Janz ICAN3 CAN Hardware Configuration Message Types */ +#define HWCONF_TERMINATE_ON 0x01 +#define HWCONF_TERMINATE_OFF 0x00 + +/* Janz ICAN3 CAN Event Indication Message Types */ +#define CEVTIND_EI 0x01 +#define CEVTIND_DOI 0x02 +#define CEVTIND_LOST 0x04 +#define CEVTIND_FULL 0x08 +#define CEVTIND_BEI 0x10 + +#define CEVTIND_CHIP_SJA1000 0x02 + +#define ICAN3_BUSERR_QUOTA_MAX 255 + +/* Janz ICAN3 CAN Frame Conversion */ +#define ICAN3_ECHO 0x10 +#define ICAN3_EFF_RTR 0x40 +#define ICAN3_SFF_RTR 0x10 +#define ICAN3_EFF 0x80 + +#define ICAN3_CAN_TYPE_MASK 0x0f +#define ICAN3_CAN_TYPE_SFF 0x00 +#define ICAN3_CAN_TYPE_EFF 0x01 + +#define ICAN3_CAN_DLC_MASK 0x0f + +/* + * SJA1000 Status and Error Register Definitions + * + * Copied from drivers/net/can/sja1000/sja1000.h + */ + +/* status register content */ +#define SR_BS 0x80 +#define SR_ES 0x40 +#define SR_TS 0x20 +#define SR_RS 0x10 +#define SR_TCS 0x08 +#define SR_TBS 0x04 +#define SR_DOS 0x02 +#define SR_RBS 0x01 + +#define SR_CRIT (SR_BS|SR_ES) + +/* ECC register */ +#define ECC_SEG 0x1F +#define ECC_DIR 0x20 +#define ECC_ERR 6 +#define ECC_BIT 0x00 +#define ECC_FORM 0x40 +#define ECC_STUFF 0x80 +#define ECC_MASK 0xc0 + +/* Number of buffers for use in the "new-style" host interface */ +#define ICAN3_NEW_BUFFERS 16 + +/* Number of buffers for use in the "fast" host interface */ +#define ICAN3_TX_BUFFERS 512 +#define ICAN3_RX_BUFFERS 1024 + +/* SJA1000 Clock Input */ +#define ICAN3_CAN_CLOCK 8000000 + +/* Driver Name */ +#define DRV_NAME "janz-ican3" + +/* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */ +struct ican3_dpm_control { + /* window address register */ + u8 window_address; + u8 unused1; + + /* + * Read access: clear interrupt from microcontroller + * Write access: send interrupt to microcontroller + */ + u8 interrupt; + u8 unused2; + + /* write-only: reset all hardware on the module */ + u8 hwreset; + u8 unused3; + + /* write-only: generate an interrupt to the TPU */ + u8 tpuinterrupt; +}; + +struct ican3_dev { + + /* must be the first member */ + struct can_priv can; + + /* CAN network device */ + struct net_device *ndev; + struct napi_struct napi; + + /* Device for printing */ + struct device *dev; + + /* module number */ + unsigned int num; + + /* base address of registers and IRQ */ + struct janz_cmodio_onboard_regs __iomem *ctrl; + struct ican3_dpm_control __iomem *dpmctrl; + void __iomem *dpm; + int irq; + + /* CAN bus termination status */ + struct completion termination_comp; + bool termination_enabled; + + /* CAN bus error status registers */ + struct completion buserror_comp; + struct can_berr_counter bec; + + /* old and new style host interface */ + unsigned int iftype; + + /* + * Any function which changes the current DPM page must hold this + * lock while it is performing data accesses. This ensures that the + * function will not be preempted and end up reading data from a + * different DPM page than it expects. + */ + spinlock_t lock; + + /* new host interface */ + unsigned int rx_int; + unsigned int rx_num; + unsigned int tx_num; + + /* fast host interface */ + unsigned int fastrx_start; + unsigned int fastrx_int; + unsigned int fastrx_num; + unsigned int fasttx_start; + unsigned int fasttx_num; + + /* first free DPM page */ + unsigned int free_page; +}; + +struct ican3_msg { + u8 control; + u8 spec; + __le16 len; + u8 data[252]; +}; + +struct ican3_new_desc { + u8 control; + u8 pointer; +}; + +struct ican3_fast_desc { + u8 control; + u8 command; + u8 data[14]; +}; + +/* write to the window basic address register */ +static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page) +{ + BUG_ON(page >= DPM_NUM_PAGES); + iowrite8(page, &mod->dpmctrl->window_address); +} + +/* + * ICAN3 "old-style" host interface + */ + +/* + * Recieve a message from the ICAN3 "old-style" firmware interface + * + * LOCKING: must hold mod->lock + * + * returns 0 on success, -ENOMEM when no message exists + */ +static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned int mbox, mbox_page; + u8 locl, peer, xord; + + /* get the MSYNC registers */ + ican3_set_page(mod, QUEUE_OLD_CONTROL); + peer = ioread8(mod->dpm + MSYNC_PEER); + locl = ioread8(mod->dpm + MSYNC_LOCL); + xord = locl ^ peer; + + if ((xord & MSYNC_RB_MASK) == 0x00) { + dev_dbg(mod->dev, "no mbox for reading\n"); + return -ENOMEM; + } + + /* find the first free mbox to read */ + if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK) + mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1; + else + mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1; + + /* copy the message */ + mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1; + ican3_set_page(mod, mbox_page); + memcpy_fromio(msg, mod->dpm, sizeof(*msg)); + + /* + * notify the firmware that the read buffer is available + * for it to fill again + */ + locl ^= mbox; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + iowrite8(locl, mod->dpm + MSYNC_LOCL); + return 0; +} + +/* + * Send a message through the "old-style" firmware interface + * + * LOCKING: must hold mod->lock + * + * returns 0 on success, -ENOMEM when no free space exists + */ +static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned int mbox, mbox_page; + u8 locl, peer, xord; + + /* get the MSYNC registers */ + ican3_set_page(mod, QUEUE_OLD_CONTROL); + peer = ioread8(mod->dpm + MSYNC_PEER); + locl = ioread8(mod->dpm + MSYNC_LOCL); + xord = locl ^ peer; + + if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { + dev_err(mod->dev, "no mbox for writing\n"); + return -ENOMEM; + } + + /* calculate a free mbox to use */ + mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0; + + /* copy the message to the DPM */ + mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1; + ican3_set_page(mod, mbox_page); + memcpy_toio(mod->dpm, msg, sizeof(*msg)); + + locl ^= mbox; + if (mbox == MSYNC_WB1) + locl |= MSYNC_WBLW; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + iowrite8(locl, mod->dpm + MSYNC_LOCL); + return 0; +} + +/* + * ICAN3 "new-style" Host Interface Setup + */ + +static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod) +{ + struct ican3_new_desc desc; + unsigned long flags; + void __iomem *dst; + int i; + + spin_lock_irqsave(&mod->lock, flags); + + /* setup the internal datastructures for RX */ + mod->rx_num = 0; + mod->rx_int = 0; + + /* tohost queue descriptors are in page 5 */ + ican3_set_page(mod, QUEUE_TOHOST); + dst = mod->dpm; + + /* initialize the tohost (rx) queue descriptors: pages 9-24 */ + for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { + desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */ + desc.pointer = mod->free_page; + + /* set wrap flag on last buffer */ + if (i == ICAN3_NEW_BUFFERS - 1) + desc.control |= DESC_WRAP; + + memcpy_toio(dst, &desc, sizeof(desc)); + dst += sizeof(desc); + mod->free_page++; + } + + /* fromhost (tx) mid queue descriptors are in page 6 */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + dst = mod->dpm; + + /* setup the internal datastructures for TX */ + mod->tx_num = 0; + + /* initialize the fromhost mid queue descriptors: pages 25-40 */ + for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { + desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */ + desc.pointer = mod->free_page; + + /* set wrap flag on last buffer */ + if (i == ICAN3_NEW_BUFFERS - 1) + desc.control |= DESC_WRAP; + + memcpy_toio(dst, &desc, sizeof(desc)); + dst += sizeof(desc); + mod->free_page++; + } + + /* fromhost hi queue descriptors are in page 7 */ + ican3_set_page(mod, QUEUE_FROMHOST_HIGH); + dst = mod->dpm; + + /* initialize only a single buffer in the fromhost hi queue (unused) */ + desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ + desc.pointer = mod->free_page; + memcpy_toio(dst, &desc, sizeof(desc)); + mod->free_page++; + + /* fromhost low queue descriptors are in page 8 */ + ican3_set_page(mod, QUEUE_FROMHOST_LOW); + dst = mod->dpm; + + /* initialize only a single buffer in the fromhost low queue (unused) */ + desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ + desc.pointer = mod->free_page; + memcpy_toio(dst, &desc, sizeof(desc)); + mod->free_page++; + + spin_unlock_irqrestore(&mod->lock, flags); +} + +/* + * ICAN3 Fast Host Interface Setup + */ + +static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod) +{ + struct ican3_fast_desc desc; + unsigned long flags; + unsigned int addr; + void __iomem *dst; + int i; + + spin_lock_irqsave(&mod->lock, flags); + + /* save the start recv page */ + mod->fastrx_start = mod->free_page; + mod->fastrx_num = 0; + mod->fastrx_int = 0; + + /* build a single fast tohost queue descriptor */ + memset(&desc, 0, sizeof(desc)); + desc.control = 0x00; + desc.command = 1; + + /* build the tohost queue descriptor ring in memory */ + addr = 0; + for (i = 0; i < ICAN3_RX_BUFFERS; i++) { + + /* set the wrap bit on the last buffer */ + if (i == ICAN3_RX_BUFFERS - 1) + desc.control |= DESC_WRAP; + + /* switch to the correct page */ + ican3_set_page(mod, mod->free_page); + + /* copy the descriptor to the DPM */ + dst = mod->dpm + addr; + memcpy_toio(dst, &desc, sizeof(desc)); + addr += sizeof(desc); + + /* move to the next page if necessary */ + if (addr >= DPM_PAGE_SIZE) { + addr = 0; + mod->free_page++; + } + } + + /* make sure we page-align the next queue */ + if (addr != 0) + mod->free_page++; + + /* save the start xmit page */ + mod->fasttx_start = mod->free_page; + mod->fasttx_num = 0; + + /* build a single fast fromhost queue descriptor */ + memset(&desc, 0, sizeof(desc)); + desc.control = DESC_VALID; + desc.command = 1; + + /* build the fromhost queue descriptor ring in memory */ + addr = 0; + for (i = 0; i < ICAN3_TX_BUFFERS; i++) { + + /* set the wrap bit on the last buffer */ + if (i == ICAN3_TX_BUFFERS - 1) + desc.control |= DESC_WRAP; + + /* switch to the correct page */ + ican3_set_page(mod, mod->free_page); + + /* copy the descriptor to the DPM */ + dst = mod->dpm + addr; + memcpy_toio(dst, &desc, sizeof(desc)); + addr += sizeof(desc); + + /* move to the next page if necessary */ + if (addr >= DPM_PAGE_SIZE) { + addr = 0; + mod->free_page++; + } + } + + spin_unlock_irqrestore(&mod->lock, flags); +} + +/* + * ICAN3 "new-style" Host Interface Message Helpers + */ + +/* + * LOCKING: must hold mod->lock + */ +static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct ican3_new_desc desc; + void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc)); + + /* switch to the fromhost mid queue, and read the buffer descriptor */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + if (!(desc.control & DESC_VALID)) { + dev_dbg(mod->dev, "%s: no free buffers\n", __func__); + return -ENOMEM; + } + + /* switch to the data page, copy the data */ + ican3_set_page(mod, desc.pointer); + memcpy_toio(mod->dpm, msg, sizeof(*msg)); + + /* switch back to the descriptor, set the valid bit, write it back */ + ican3_set_page(mod, QUEUE_FROMHOST_MID); + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the tx number */ + mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1); + return 0; +} + +/* + * LOCKING: must hold mod->lock + */ +static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct ican3_new_desc desc; + void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc)); + + /* switch to the tohost queue, and read the buffer descriptor */ + ican3_set_page(mod, QUEUE_TOHOST); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + if (!(desc.control & DESC_VALID)) { + dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__); + return -ENOMEM; + } + + /* switch to the data page, copy the data */ + ican3_set_page(mod, desc.pointer); + memcpy_fromio(msg, mod->dpm, sizeof(*msg)); + + /* switch back to the descriptor, toggle the valid bit, write it back */ + ican3_set_page(mod, QUEUE_TOHOST); + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the rx number */ + mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1); + return 0; +} + +/* + * Message Send / Recv Helpers + */ + +static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mod->lock, flags); + + if (mod->iftype == 0) + ret = ican3_old_send_msg(mod, msg); + else + ret = ican3_new_send_msg(mod, msg); + + spin_unlock_irqrestore(&mod->lock, flags); + return ret; +} + +static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mod->lock, flags); + + if (mod->iftype == 0) + ret = ican3_old_recv_msg(mod, msg); + else + ret = ican3_new_recv_msg(mod, msg); + + spin_unlock_irqrestore(&mod->lock, flags); + return ret; +} + +/* + * Quick Pre-constructed Messages + */ + +static int __devinit ican3_msg_connect(struct ican3_dev *mod) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CONNECTI; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int __devexit ican3_msg_disconnect(struct ican3_dev *mod) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_DISCONNECT; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int __devinit ican3_msg_newhostif(struct ican3_dev *mod) +{ + struct ican3_msg msg; + int ret; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_NEWHOSTIF; + msg.len = cpu_to_le16(0); + + /* If we're not using the old interface, switching seems bogus */ + WARN_ON(mod->iftype != 0); + + ret = ican3_send_msg(mod, &msg); + if (ret) + return ret; + + /* mark the module as using the new host interface */ + mod->iftype = 1; + return 0; +} + +static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod) +{ + struct ican3_msg msg; + unsigned int addr; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_INITFDPMQUEUE; + msg.len = cpu_to_le16(8); + + /* write the tohost queue start address */ + addr = DPM_PAGE_ADDR(mod->fastrx_start); + msg.data[0] = addr & 0xff; + msg.data[1] = (addr >> 8) & 0xff; + msg.data[2] = (addr >> 16) & 0xff; + msg.data[3] = (addr >> 24) & 0xff; + + /* write the fromhost queue start address */ + addr = DPM_PAGE_ADDR(mod->fasttx_start); + msg.data[4] = addr & 0xff; + msg.data[5] = (addr >> 8) & 0xff; + msg.data[6] = (addr >> 16) & 0xff; + msg.data[7] = (addr >> 24) & 0xff; + + /* If we're not using the new interface yet, we cannot do this */ + WARN_ON(mod->iftype != 1); + + return ican3_send_msg(mod, &msg); +} + +/* + * Setup the CAN filter to either accept or reject all + * messages from the CAN bus. + */ +static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept) +{ + struct ican3_msg msg; + int ret; + + /* Standard Frame Format */ + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_SETAFILMASK; + msg.len = cpu_to_le16(5); + msg.data[0] = 0x00; /* IDLo LSB */ + msg.data[1] = 0x00; /* IDLo MSB */ + msg.data[2] = 0xff; /* IDHi LSB */ + msg.data[3] = 0x07; /* IDHi MSB */ + + /* accept all frames for fast host if, or reject all frames */ + msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; + + ret = ican3_send_msg(mod, &msg); + if (ret) + return ret; + + /* Extended Frame Format */ + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_SETAFILMASK; + msg.len = cpu_to_le16(13); + msg.data[0] = 0; /* MUX = 0 */ + msg.data[1] = 0x00; /* IDLo LSB */ + msg.data[2] = 0x00; + msg.data[3] = 0x00; + msg.data[4] = 0x20; /* IDLo MSB */ + msg.data[5] = 0xff; /* IDHi LSB */ + msg.data[6] = 0xff; + msg.data[7] = 0xff; + msg.data[8] = 0x3f; /* IDHi MSB */ + + /* accept all frames for fast host if, or reject all frames */ + msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; + + return ican3_send_msg(mod, &msg); +} + +/* + * Bring the CAN bus online or offline + */ +static int ican3_set_bus_state(struct ican3_dev *mod, bool on) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; + msg.len = cpu_to_le16(0); + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_termination(struct ican3_dev *mod, bool on) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_HWCONF; + msg.len = cpu_to_le16(2); + msg.data[0] = 0x00; + msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_INQUIRY; + msg.len = cpu_to_le16(2); + msg.data[0] = subspec; + msg.data[1] = 0x00; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) +{ + struct ican3_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CCONFREQ; + msg.len = cpu_to_le16(2); + msg.data[0] = 0x00; + msg.data[1] = quota; + + return ican3_send_msg(mod, &msg); +} + +/* + * ICAN3 to Linux CAN Frame Conversion + */ + +static void ican3_to_can_frame(struct ican3_dev *mod, + struct ican3_fast_desc *desc, + struct can_frame *cf) +{ + if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) { + if (desc->data[1] & ICAN3_SFF_RTR) + cf->can_id |= CAN_RTR_FLAG; + + cf->can_id |= desc->data[0] << 3; + cf->can_id |= (desc->data[1] & 0xe0) >> 5; + cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK; + memcpy(cf->data, &desc->data[2], sizeof(cf->data)); + } else { + cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK; + if (desc->data[0] & ICAN3_EFF_RTR) + cf->can_id |= CAN_RTR_FLAG; + + if (desc->data[0] & ICAN3_EFF) { + cf->can_id |= CAN_EFF_FLAG; + cf->can_id |= desc->data[2] << 21; /* 28-21 */ + cf->can_id |= desc->data[3] << 13; /* 20-13 */ + cf->can_id |= desc->data[4] << 5; /* 12-5 */ + cf->can_id |= (desc->data[5] & 0xf8) >> 3; + } else { + cf->can_id |= desc->data[2] << 3; /* 10-3 */ + cf->can_id |= desc->data[3] >> 5; /* 2-0 */ + } + + memcpy(cf->data, &desc->data[6], sizeof(cf->data)); + } +} + +static void can_frame_to_ican3(struct ican3_dev *mod, + struct can_frame *cf, + struct ican3_fast_desc *desc) +{ + /* clear out any stale data in the descriptor */ + memset(desc->data, 0, sizeof(desc->data)); + + /* we always use the extended format, with the ECHO flag set */ + desc->command = ICAN3_CAN_TYPE_EFF; + desc->data[0] |= cf->can_dlc; + desc->data[1] |= ICAN3_ECHO; + + if (cf->can_id & CAN_RTR_FLAG) + desc->data[0] |= ICAN3_EFF_RTR; + + /* pack the id into the correct places */ + if (cf->can_id & CAN_EFF_FLAG) { + desc->data[0] |= ICAN3_EFF; + desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */ + desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */ + desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */ + desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */ + } else { + desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */ + desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */ + } + + /* copy the data bits into the descriptor */ + memcpy(&desc->data[6], cf->data, sizeof(cf->data)); +} + +/* + * Interrupt Handling + */ + +/* + * Handle an ID + Version message response from the firmware. We never generate + * this message in production code, but it is very useful when debugging to be + * able to display this message. + */ +static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) +{ + dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data); +} + +static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct net_device *dev = mod->ndev; + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* + * Report that communication messages with the microcontroller firmware + * are being lost. These are never CAN frames, so we do not generate an + * error frame for userspace + */ + if (msg->spec == MSG_MSGLOST) { + dev_err(mod->dev, "lost %d control messages\n", msg->data[0]); + return; + } + + /* + * Oops, this indicates that we have lost messages in the fast queue, + * which are exclusively CAN messages. Our driver isn't reading CAN + * frames fast enough. + * + * We'll pretend that the SJA1000 told us that it ran out of buffer + * space, because there is not a better message for this. + */ + skb = alloc_can_err_skb(dev, &cf); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_errors++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +/* + * Handle CAN Event Indication Messages from the firmware + * + * The ICAN3 firmware provides the values of some SJA1000 registers when it + * generates this message. The code below is largely copied from the + * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary + */ +static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) +{ + struct net_device *dev = mod->ndev; + struct net_device_stats *stats = &dev->stats; + enum can_state state = mod->can.state; + u8 status, isrc, rxerr, txerr; + struct can_frame *cf; + struct sk_buff *skb; + + /* we can only handle the SJA1000 part */ + if (msg->data[1] != CEVTIND_CHIP_SJA1000) { + dev_err(mod->dev, "unable to handle errors on non-SJA1000\n"); + return -ENODEV; + } + + /* check the message length for sanity */ + if (le16_to_cpu(msg->len) < 6) { + dev_err(mod->dev, "error message too short\n"); + return -EINVAL; + } + + skb = alloc_can_err_skb(dev, &cf); + if (skb == NULL) + return -ENOMEM; + + isrc = msg->data[0]; + status = msg->data[3]; + rxerr = msg->data[4]; + txerr = msg->data[5]; + + /* data overrun interrupt */ + if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { + dev_dbg(mod->dev, "data overrun interrupt\n"); + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* error warning + passive interrupt */ + if (isrc == CEVTIND_EI) { + dev_dbg(mod->dev, "error warning + passive interrupt\n"); + if (status & SR_BS) { + state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + can_bus_off(dev); + } else if (status & SR_ES) { + if (rxerr >= 128 || txerr >= 128) + state = CAN_STATE_ERROR_PASSIVE; + else + state = CAN_STATE_ERROR_WARNING; + } else { + state = CAN_STATE_ERROR_ACTIVE; + } + } + + /* bus error interrupt */ + if (isrc == CEVTIND_BEI) { + u8 ecc = msg->data[2]; + + dev_dbg(mod->dev, "bus error interrupt\n"); + mod->can.can_stats.bus_error++; + stats->rx_errors++; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + + switch (ecc & ECC_MASK) { + case ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + cf->data[3] = ecc & ECC_SEG; + break; + } + + if ((ecc & ECC_DIR) == 0) + cf->data[2] |= CAN_ERR_PROT_TX; + + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || + state == CAN_STATE_ERROR_PASSIVE)) { + cf->can_id |= CAN_ERR_CRTL; + if (state == CAN_STATE_ERROR_WARNING) { + mod->can.can_stats.error_warning++; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + } else { + mod->can.can_stats.error_passive++; + cf->data[1] = (txerr > rxerr) ? + CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + + mod->can.state = state; + stats->rx_errors++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + return 0; +} + +static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) +{ + switch (msg->data[0]) { + case INQUIRY_STATUS: + case INQUIRY_EXTENDED: + mod->bec.rxerr = msg->data[5]; + mod->bec.txerr = msg->data[6]; + complete(&mod->buserror_comp); + break; + case INQUIRY_TERMINATION: + mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; + complete(&mod->termination_comp); + break; + default: + dev_err(mod->dev, "recieved an unknown inquiry response\n"); + break; + } +} + +static void ican3_handle_unknown_message(struct ican3_dev *mod, + struct ican3_msg *msg) +{ + dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n", + msg->spec, le16_to_cpu(msg->len)); +} + +/* + * Handle a control message from the firmware + */ +static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) +{ + dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, + mod->num, msg->spec, le16_to_cpu(msg->len)); + + switch (msg->spec) { + case MSG_IDVERS: + ican3_handle_idvers(mod, msg); + break; + case MSG_MSGLOST: + case MSG_FMSGLOST: + ican3_handle_msglost(mod, msg); + break; + case MSG_CEVTIND: + ican3_handle_cevtind(mod, msg); + break; + case MSG_INQUIRY: + ican3_handle_inquiry(mod, msg); + break; + default: + ican3_handle_unknown_message(mod, msg); + break; + } +} + +/* + * Check that there is room in the TX ring to transmit another skb + * + * LOCKING: must hold mod->lock + */ +static bool ican3_txok(struct ican3_dev *mod) +{ + struct ican3_fast_desc __iomem *desc; + u8 control; + + /* copy the control bits of the descriptor */ + ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); + desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc)); + control = ioread8(&desc->control); + + /* if the control bits are not valid, then we have no more space */ + if (!(control & DESC_VALID)) + return false; + + return true; +} + +/* + * Recieve one CAN frame from the hardware + * + * This works like the core of a NAPI function, but is intended to be called + * from workqueue context instead. This driver already needs a workqueue to + * process control messages, so we use the workqueue instead of using NAPI. + * This was done to simplify locking. + * + * CONTEXT: must be called from user context + */ +static int ican3_recv_skb(struct ican3_dev *mod) +{ + struct net_device *ndev = mod->ndev; + struct net_device_stats *stats = &ndev->stats; + struct ican3_fast_desc desc; + void __iomem *desc_addr; + struct can_frame *cf; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&mod->lock, flags); + + /* copy the whole descriptor */ + ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); + desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc)); + memcpy_fromio(&desc, desc_addr, sizeof(desc)); + + spin_unlock_irqrestore(&mod->lock, flags); + + /* check that we actually have a CAN frame */ + if (!(desc.control & DESC_VALID)) + return -ENOBUFS; + + /* allocate an skb */ + skb = alloc_can_skb(ndev, &cf); + if (unlikely(skb == NULL)) { + stats->rx_dropped++; + goto err_noalloc; + } + + /* convert the ICAN3 frame into Linux CAN format */ + ican3_to_can_frame(mod, &desc, cf); + + /* receive the skb, update statistics */ + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + +err_noalloc: + /* toggle the valid bit and return the descriptor to the ring */ + desc.control ^= DESC_VALID; + + spin_lock_irqsave(&mod->lock, flags); + + ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); + memcpy_toio(desc_addr, &desc, 1); + + /* update the next buffer pointer */ + mod->fastrx_num = (desc.control & DESC_WRAP) ? 0 + : (mod->fastrx_num + 1); + + /* there are still more buffers to process */ + spin_unlock_irqrestore(&mod->lock, flags); + return 0; +} + +static int ican3_napi(struct napi_struct *napi, int budget) +{ + struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi); + struct ican3_msg msg; + unsigned long flags; + int received = 0; + int ret; + + /* process all communication messages */ + while (true) { + ret = ican3_recv_msg(mod, &msg); + if (ret) + break; + + ican3_handle_message(mod, &msg); + } + + /* process all CAN frames from the fast interface */ + while (received < budget) { + ret = ican3_recv_skb(mod); + if (ret) + break; + + received++; + } + + /* We have processed all packets that the adapter had, but it + * was less than our budget, stop polling */ + if (received < budget) + napi_complete(napi); + + spin_lock_irqsave(&mod->lock, flags); + + /* Wake up the transmit queue if necessary */ + if (netif_queue_stopped(mod->ndev) && ican3_txok(mod)) + netif_wake_queue(mod->ndev); + + spin_unlock_irqrestore(&mod->lock, flags); + + /* re-enable interrupt generation */ + iowrite8(1 << mod->num, &mod->ctrl->int_enable); + return received; +} + +static irqreturn_t ican3_irq(int irq, void *dev_id) +{ + struct ican3_dev *mod = dev_id; + u8 stat; + + /* + * The interrupt status register on this device reports interrupts + * as zeroes instead of using ones like most other devices + */ + stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num); + if (stat == (1 << mod->num)) + return IRQ_NONE; + + /* clear the MODULbus interrupt from the microcontroller */ + ioread8(&mod->dpmctrl->interrupt); + + /* disable interrupt generation, schedule the NAPI poller */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + napi_schedule(&mod->napi); + return IRQ_HANDLED; +} + +/* + * Firmware reset, startup, and shutdown + */ + +/* + * Reset an ICAN module to its power-on state + * + * CONTEXT: no network device registered + * LOCKING: work function disabled + */ +static int ican3_reset_module(struct ican3_dev *mod) +{ + u8 val = 1 << mod->num; + unsigned long start; + u8 runold, runnew; + + /* disable interrupts so no more work is scheduled */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + + /* flush any pending work */ + flush_scheduled_work(); + + /* the first unallocated page in the DPM is #9 */ + mod->free_page = DPM_FREE_START; + + ican3_set_page(mod, QUEUE_OLD_CONTROL); + runold = ioread8(mod->dpm + TARGET_RUNNING); + + /* reset the module */ + iowrite8(val, &mod->ctrl->reset_assert); + iowrite8(val, &mod->ctrl->reset_deassert); + + /* wait until the module has finished resetting and is running */ + start = jiffies; + do { + ican3_set_page(mod, QUEUE_OLD_CONTROL); + runnew = ioread8(mod->dpm + TARGET_RUNNING); + if (runnew == (runold ^ 0xff)) + return 0; + + msleep(10); + } while (time_before(jiffies, start + HZ / 4)); + + dev_err(mod->dev, "failed to reset CAN module\n"); + return -ETIMEDOUT; +} + +static void __devexit ican3_shutdown_module(struct ican3_dev *mod) +{ + ican3_msg_disconnect(mod); + ican3_reset_module(mod); +} + +/* + * Startup an ICAN module, bringing it into fast mode + */ +static int __devinit ican3_startup_module(struct ican3_dev *mod) +{ + int ret; + + ret = ican3_reset_module(mod); + if (ret) { + dev_err(mod->dev, "unable to reset module\n"); + return ret; + } + + /* re-enable interrupts so we can send messages */ + iowrite8(1 << mod->num, &mod->ctrl->int_enable); + + ret = ican3_msg_connect(mod); + if (ret) { + dev_err(mod->dev, "unable to connect to module\n"); + return ret; + } + + ican3_init_new_host_interface(mod); + ret = ican3_msg_newhostif(mod); + if (ret) { + dev_err(mod->dev, "unable to switch to new-style interface\n"); + return ret; + } + + /* default to "termination on" */ + ret = ican3_set_termination(mod, true); + if (ret) { + dev_err(mod->dev, "unable to enable termination\n"); + return ret; + } + + /* default to "bus errors enabled" */ + ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX); + if (ret) { + dev_err(mod->dev, "unable to set bus-error\n"); + return ret; + } + + ican3_init_fast_host_interface(mod); + ret = ican3_msg_fasthostif(mod); + if (ret) { + dev_err(mod->dev, "unable to switch to fast host interface\n"); + return ret; + } + + ret = ican3_set_id_filter(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set acceptance filter\n"); + return ret; + } + + return 0; +} + +/* + * CAN Network Device + */ + +static int ican3_open(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + u8 quota; + int ret; + + /* open the CAN layer */ + ret = open_candev(ndev); + if (ret) { + dev_err(mod->dev, "unable to start CAN layer\n"); + return ret; + } + + /* set the bus error generation state appropriately */ + if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) + quota = ICAN3_BUSERR_QUOTA_MAX; + else + quota = 0; + + ret = ican3_set_buserror(mod, quota); + if (ret) { + dev_err(mod->dev, "unable to set bus-error\n"); + close_candev(ndev); + return ret; + } + + /* bring the bus online */ + ret = ican3_set_bus_state(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set bus-on\n"); + close_candev(ndev); + return ret; + } + + /* start up the network device */ + mod->can.state = CAN_STATE_ERROR_ACTIVE; + netif_start_queue(ndev); + + return 0; +} + +static int ican3_stop(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + /* stop the network device xmit routine */ + netif_stop_queue(ndev); + mod->can.state = CAN_STATE_STOPPED; + + /* bring the bus offline, stop receiving packets */ + ret = ican3_set_bus_state(mod, false); + if (ret) { + dev_err(mod->dev, "unable to set bus-off\n"); + return ret; + } + + /* close the CAN layer */ + close_candev(ndev); + return 0; +} + +static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf = (struct can_frame *)skb->data; + struct ican3_fast_desc desc; + void __iomem *desc_addr; + unsigned long flags; + + spin_lock_irqsave(&mod->lock, flags); + + /* check that we can actually transmit */ + if (!ican3_txok(mod)) { + dev_err(mod->dev, "no free descriptors, stopping queue\n"); + netif_stop_queue(ndev); + spin_unlock_irqrestore(&mod->lock, flags); + return NETDEV_TX_BUSY; + } + + /* copy the control bits of the descriptor */ + ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); + desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc)); + memset(&desc, 0, sizeof(desc)); + memcpy_fromio(&desc, desc_addr, 1); + + /* convert the Linux CAN frame into ICAN3 format */ + can_frame_to_ican3(mod, cf, &desc); + + /* + * the programming manual says that you must set the IVALID bit, then + * interrupt, then set the valid bit. Quite weird, but it seems to be + * required for this to work + */ + desc.control |= DESC_IVALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* generate a MODULbus interrupt to the microcontroller */ + iowrite8(0x01, &mod->dpmctrl->interrupt); + + desc.control ^= DESC_VALID; + memcpy_toio(desc_addr, &desc, sizeof(desc)); + + /* update the next buffer pointer */ + mod->fasttx_num = (desc.control & DESC_WRAP) ? 0 + : (mod->fasttx_num + 1); + + /* update statistics */ + stats->tx_packets++; + stats->tx_bytes += cf->can_dlc; + kfree_skb(skb); + + /* + * This hardware doesn't have TX-done notifications, so we'll try and + * emulate it the best we can using ECHO skbs. Get the next TX + * descriptor, and see if we have room to send. If not, stop the queue. + * It will be woken when the ECHO skb for the current packet is recv'd. + */ + + /* copy the control bits of the descriptor */ + if (!ican3_txok(mod)) + netif_stop_queue(ndev); + + spin_unlock_irqrestore(&mod->lock, flags); + return NETDEV_TX_OK; +} + +static const struct net_device_ops ican3_netdev_ops = { + .ndo_open = ican3_open, + .ndo_stop = ican3_stop, + .ndo_start_xmit = ican3_xmit, +}; + +/* + * Low-level CAN Device + */ + +/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */ +static struct can_bittiming_const ican3_bittiming_const = { + .name = DRV_NAME, + .tseg1_min = 1, + .tseg1_max = 16, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 64, + .brp_inc = 1, +}; + +/* + * This routine was stolen from drivers/net/can/sja1000/sja1000.c + * + * The bittiming register command for the ICAN3 just sets the bit timing + * registers on the SJA1000 chip directly + */ +static int ican3_set_bittiming(struct net_device *ndev) +{ + struct ican3_dev *mod = netdev_priv(ndev); + struct can_bittiming *bt = &mod->can.bittiming; + struct ican3_msg msg; + u8 btr0, btr1; + + btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); + btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | + (((bt->phase_seg2 - 1) & 0x7) << 4); + if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + btr1 |= 0x80; + + memset(&msg, 0, sizeof(msg)); + msg.spec = MSG_CBTRREQ; + msg.len = cpu_to_le16(4); + msg.data[0] = 0x00; + msg.data[1] = 0x00; + msg.data[2] = btr0; + msg.data[3] = btr1; + + return ican3_send_msg(mod, &msg); +} + +static int ican3_set_mode(struct net_device *ndev, enum can_mode mode) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + if (mode != CAN_MODE_START) + return -ENOTSUPP; + + /* bring the bus online */ + ret = ican3_set_bus_state(mod, true); + if (ret) { + dev_err(mod->dev, "unable to set bus-on\n"); + return ret; + } + + /* start up the network device */ + mod->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + return 0; +} + +static int ican3_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct ican3_dev *mod = netdev_priv(ndev); + int ret; + + ret = ican3_send_inquiry(mod, INQUIRY_STATUS); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); + if (ret <= 0) { + dev_info(mod->dev, "%s timed out\n", __func__); + return -ETIMEDOUT; + } + + bec->rxerr = mod->bec.rxerr; + bec->txerr = mod->bec.txerr; + return 0; +} + +/* + * Sysfs Attributes + */ + +static ssize_t ican3_sysfs_show_term(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); + int ret; + + ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&mod->termination_comp, HZ); + if (ret <= 0) { + dev_info(mod->dev, "%s timed out\n", __func__); + return -ETIMEDOUT; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled); +} + +static ssize_t ican3_sysfs_set_term(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); + unsigned long enable; + int ret; + + if (strict_strtoul(buf, 0, &enable)) + return -EINVAL; + + ret = ican3_set_termination(mod, enable); + if (ret) + return ret; + + return count; +} + +static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term, + ican3_sysfs_set_term); + +static struct attribute *ican3_sysfs_attrs[] = { + &dev_attr_termination.attr, + NULL, +}; + +static struct attribute_group ican3_sysfs_attr_group = { + .attrs = ican3_sysfs_attrs, +}; + +/* + * PCI Subsystem + */ + +static int __devinit ican3_probe(struct platform_device *pdev) +{ + struct janz_platform_data *pdata; + struct net_device *ndev; + struct ican3_dev *mod; + struct resource *res; + struct device *dev; + int ret; + + pdata = pdev->dev.platform_data; + if (!pdata) + return -ENXIO; + + dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno); + + /* save the struct device for printing */ + dev = &pdev->dev; + + /* allocate the CAN device and private data */ + ndev = alloc_candev(sizeof(*mod), 0); + if (!ndev) { + dev_err(dev, "unable to allocate CANdev\n"); + ret = -ENOMEM; + goto out_return; + } + + platform_set_drvdata(pdev, ndev); + mod = netdev_priv(ndev); + mod->ndev = ndev; + mod->dev = &pdev->dev; + mod->num = pdata->modno; + netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); + spin_lock_init(&mod->lock); + init_completion(&mod->termination_comp); + init_completion(&mod->buserror_comp); + + /* setup device-specific sysfs attributes */ + ndev->sysfs_groups[0] = &ican3_sysfs_attr_group; + + /* the first unallocated page in the DPM is 9 */ + mod->free_page = DPM_FREE_START; + + ndev->netdev_ops = &ican3_netdev_ops; + ndev->flags |= IFF_ECHO; + SET_NETDEV_DEV(ndev, &pdev->dev); + + mod->can.clock.freq = ICAN3_CAN_CLOCK; + mod->can.bittiming_const = &ican3_bittiming_const; + mod->can.do_set_bittiming = ican3_set_bittiming; + mod->can.do_set_mode = ican3_set_mode; + mod->can.do_get_berr_counter = ican3_get_berr_counter; + mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES + | CAN_CTRLMODE_BERR_REPORTING; + + /* find our IRQ number */ + mod->irq = platform_get_irq(pdev, 0); + if (mod->irq < 0) { + dev_err(dev, "IRQ line not found\n"); + ret = -ENODEV; + goto out_free_ndev; + } + + ndev->irq = mod->irq; + + /* get access to the MODULbus registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "MODULbus registers not found\n"); + ret = -ENODEV; + goto out_free_ndev; + } + + mod->dpm = ioremap(res->start, resource_size(res)); + if (!mod->dpm) { + dev_err(dev, "MODULbus registers not ioremap\n"); + ret = -ENOMEM; + goto out_free_ndev; + } + + mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE; + + /* get access to the control registers for this module */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "CONTROL registers not found\n"); + ret = -ENODEV; + goto out_iounmap_dpm; + } + + mod->ctrl = ioremap(res->start, resource_size(res)); + if (!mod->ctrl) { + dev_err(dev, "CONTROL registers not ioremap\n"); + ret = -ENOMEM; + goto out_iounmap_dpm; + } + + /* disable our IRQ, then hookup the IRQ handler */ + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod); + if (ret) { + dev_err(dev, "unable to request IRQ\n"); + goto out_iounmap_ctrl; + } + + /* reset and initialize the CAN controller into fast mode */ + napi_enable(&mod->napi); + ret = ican3_startup_module(mod); + if (ret) { + dev_err(dev, "%s: unable to start CANdev\n", __func__); + goto out_free_irq; + } + + /* register with the Linux CAN layer */ + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "%s: unable to register CANdev\n", __func__); + goto out_free_irq; + } + + dev_info(dev, "module %d: registered CAN device\n", pdata->modno); + return 0; + +out_free_irq: + napi_disable(&mod->napi); + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + free_irq(mod->irq, mod); +out_iounmap_ctrl: + iounmap(mod->ctrl); +out_iounmap_dpm: + iounmap(mod->dpm); +out_free_ndev: + free_candev(ndev); +out_return: + return ret; +} + +static int __devexit ican3_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ican3_dev *mod = netdev_priv(ndev); + + /* unregister the netdevice, stop interrupts */ + unregister_netdev(ndev); + napi_disable(&mod->napi); + iowrite8(1 << mod->num, &mod->ctrl->int_disable); + free_irq(mod->irq, mod); + + /* put the module into reset */ + ican3_shutdown_module(mod); + + /* unmap all registers */ + iounmap(mod->ctrl); + iounmap(mod->dpm); + + free_candev(ndev); + + return 0; +} + +static struct platform_driver ican3_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = ican3_probe, + .remove = __devexit_p(ican3_remove), +}; + +static int __init ican3_init(void) +{ + return platform_driver_register(&ican3_driver); +} + +static void __exit ican3_exit(void) +{ + platform_driver_unregister(&ican3_driver); +} + +MODULE_AUTHOR("Ira W. Snyder "); +MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:janz-ican3"); + +module_init(ican3_init); +module_exit(ican3_exit); diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 8af8442c694a..af753936e835 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -73,7 +73,7 @@ static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev, else *mscan_clksrc = MSCAN_CLKSRC_XTAL; - freq = mpc5xxx_get_bus_frequency(ofdev->node); + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); if (!freq) return 0; @@ -152,7 +152,7 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev, } /* Determine the MSCAN device index from the physical address */ - pval = of_get_property(ofdev->node, "reg", &plen); + pval = of_get_property(ofdev->dev.of_node, "reg", &plen); BUG_ON(!pval || plen < sizeof(*pval)); clockidx = (*pval & 0x80) ? 1 : 0; if (*pval & 0x2000) @@ -168,11 +168,11 @@ static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev, */ if (clock_name && !strcmp(clock_name, "ip")) { *mscan_clksrc = MSCAN_CLKSRC_IPS; - freq = mpc5xxx_get_bus_frequency(ofdev->node); + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); } else { *mscan_clksrc = MSCAN_CLKSRC_BUS; - pval = of_get_property(ofdev->node, + pval = of_get_property(ofdev->dev.of_node, "fsl,mscan-clock-divider", &plen); if (pval && plen == sizeof(*pval)) clockdiv = *pval; @@ -251,7 +251,7 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev, const struct of_device_id *id) { struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; - struct device_node *np = ofdev->node; + struct device_node *np = ofdev->dev.of_node; struct net_device *dev; struct mscan_priv *priv; void __iomem *base; diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index be90d3598bca..fe925663d39a 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -3367,13 +3367,9 @@ static int cnic_cm_shutdown(struct cnic_dev *dev) static void cnic_init_context(struct cnic_dev *dev, u32 cid) { - struct cnic_local *cp = dev->cnic_priv; u32 cid_addr; int i; - if (CHIP_NUM(cp) == CHIP_NUM_5709) - return; - cid_addr = GET_CID_ADDR(cid); for (i = 0; i < CTX_SIZE; i += 4) @@ -3530,14 +3526,11 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) sb_id = cp->status_blk_num; tx_cid = 20; - cnic_init_context(dev, tx_cid); - cnic_init_context(dev, tx_cid + 1); cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { struct status_block_msix *sblk = cp->status_blk.bnx2; tx_cid = TX_TSS_CID + sb_id - 1; - cnic_init_context(dev, tx_cid); CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | (TX_TSS_CID << 7)); cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; @@ -3556,6 +3549,9 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; } else { + cnic_init_context(dev, tx_cid); + cnic_init_context(dev, tx_cid + 1); + offset0 = BNX2_L2CTX_TYPE; offset1 = BNX2_L2CTX_CMD_TYPE; offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 110c62072e6f..0c55177db046 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.1.1" -#define CNIC_MODULE_RELDATE "Feb 22, 2010" +#define CNIC_MODULE_VERSION "2.1.2" +#define CNIC_MODULE_RELDATE "May 26, 2010" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 326465ffbb23..ddf7a86cd466 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -681,6 +681,8 @@ static int fec_enet_mii_probe(struct net_device *dev) struct phy_device *phy_dev = NULL; int phy_addr; + fep->phy_dev = NULL; + /* find the first phy */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (fep->mii_bus->phy_map[phy_addr]) { @@ -711,6 +713,11 @@ static int fec_enet_mii_probe(struct net_device *dev) fep->link = 0; fep->full_duplex = 0; + printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), + fep->phy_dev->irq); + return 0; } @@ -756,13 +763,8 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; - if (fec_enet_mii_probe(dev) != 0) - goto err_out_unregister_bus; - return 0; -err_out_unregister_bus: - mdiobus_unregister(fep->mii_bus); err_out_free_mdio_irq: kfree(fep->mii_bus->irq); err_out_free_mdiobus: @@ -915,7 +917,12 @@ fec_enet_open(struct net_device *dev) if (ret) return ret; - /* schedule a link state check */ + /* Probe and connect to PHY when open the interface */ + ret = fec_enet_mii_probe(dev); + if (ret) { + fec_enet_free_buffers(dev); + return ret; + } phy_start(fep->phy_dev); netif_start_queue(dev); fep->opened = 1; @@ -929,10 +936,12 @@ fec_enet_close(struct net_device *dev) /* Don't know what to do yet. */ fep->opened = 0; - phy_stop(fep->phy_dev); netif_stop_queue(dev); fec_stop(dev); + if (fep->phy_dev) + phy_disconnect(fep->phy_dev); + fec_enet_free_buffers(dev); return 0; @@ -1316,11 +1325,6 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_register; - printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, - fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), - fep->phy_dev->irq); - return 0; failed_register: diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 5d45084b287d..48e91b6242ce 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -504,17 +504,54 @@ static int get_regs_len(struct net_device *dev) } /* Some transmit errors cause the transmitter to shut - * down. We now issue a restart transmit. Since the - * errors close the BD and update the pointers, the restart - * _should_ pick up without having to reset any of our - * pointers either. Also, To workaround 8260 device erratum - * CPM37, we must disable and then re-enable the transmitter - * following a Late Collision, Underrun, or Retry Limit error. + * down. We now issue a restart transmit. + * Also, to workaround 8260 device erratum CPM37, we must + * disable and then re-enable the transmitterfollowing a + * Late Collision, Underrun, or Retry Limit error. + * In addition, tbptr may point beyond BDs beyond still marked + * as ready due to internal pipelining, so we need to look back + * through the BDs and adjust tbptr to point to the last BD + * marked as ready. This may result in some buffers being + * retransmitted. */ static void tx_restart(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); fcc_t __iomem *fccp = fep->fcc.fccp; + const struct fs_platform_info *fpi = fep->fpi; + fcc_enet_t __iomem *ep = fep->fcc.ep; + cbd_t __iomem *curr_tbptr; + cbd_t __iomem *recheck_bd; + cbd_t __iomem *prev_bd; + cbd_t __iomem *last_tx_bd; + + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + + /* get the current bd held in TBPTR and scan back from this point */ + recheck_bd = curr_tbptr = (cbd_t __iomem *) + ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + + fep->ring_base); + + prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; + + /* Move through the bds in reverse, look for the earliest buffer + * that is not ready. Adjust TBPTR to the following buffer */ + while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) { + /* Go back one buffer */ + recheck_bd = prev_bd; + + /* update the previous buffer */ + prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; + + /* We should never see all bds marked as ready, check anyway */ + if (recheck_bd == curr_tbptr) + break; + } + /* Now update the TBPTR and dirty flag to the current buffer */ + W32(ep, fen_genfcc.fcc_tbptr, + (uint) (((void *)recheck_bd - fep->ring_base) + + fep->ring_mem_addr)); + fep->dirty_tx = recheck_bd; C32(fccp, fcc_gfmr, FCC_GFMR_ENT); udelay(10); diff --git a/drivers/net/greth.c b/drivers/net/greth.c index f37a4c143ddd..3a029d02c2b4 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -1607,14 +1607,13 @@ static struct of_device_id greth_of_match[] = { MODULE_DEVICE_TABLE(of, greth_of_match); static struct of_platform_driver greth_of_driver = { - .name = "grlib-greth", - .match_table = greth_of_match, + .driver = { + .name = "grlib-greth", + .owner = THIS_MODULE, + .of_match_table = greth_of_match, + }, .probe = greth_of_probe, .remove = __devexit_p(greth_of_remove), - .driver = { - .owner = THIS_MODULE, - .name = "grlib-greth", - }, }; static int __init greth_init(void) diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 694132e04af6..4e7d1d0a2340 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -1151,8 +1151,7 @@ static int __init yam_init_driver(void) dev = alloc_netdev(sizeof(struct yam_port), name, yam_setup); if (!dev) { - printk(KERN_ERR "yam: cannot allocate net device %s\n", - dev->name); + pr_err("yam: cannot allocate net device\n"); err = -ENOMEM; goto error; } diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index c80ca64277b2..7805bbf1d53a 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -4854,7 +4854,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb) * * Return 0 if successful; otherwise an error code indicating failure. */ -static int netdev_tx(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) { struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; @@ -6863,6 +6863,7 @@ static const struct net_device_ops netdev_ops = { .ndo_tx_timeout = netdev_tx_timeout, .ndo_change_mtu = netdev_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, + .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = netdev_ioctl, .ndo_set_rx_mode = netdev_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h index c03358434acb..522abe2ff25a 100644 --- a/drivers/net/ll_temac.h +++ b/drivers/net/ll_temac.h @@ -295,6 +295,10 @@ This option defaults to enabled (set) */ #define MULTICAST_CAM_TABLE_NUM 4 +/* TEMAC Synthesis features */ +#define TEMAC_FEATURE_RX_CSUM (1 << 0) +#define TEMAC_FEATURE_TX_CSUM (1 << 1) + /* TX/RX CURDESC_PTR points to first descriptor */ /* TX/RX TAILDESC_PTR points to last descriptor in linked list */ @@ -353,6 +357,7 @@ struct temac_local { struct mutex indirect_mutex; u32 options; /* Current options word */ int last_link; + unsigned int temac_features; /* Buffer descriptors */ struct cdmac_bd *tx_bd_v; diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index fa7620e28404..52dcc8495647 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -245,7 +245,7 @@ static int temac_dma_bd_init(struct net_device *ndev) CHNL_CTRL_IRQ_COAL_EN); /* 0x10220483 */ /* 0x00100483 */ - lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 | + lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN | @@ -574,6 +574,10 @@ static void temac_start_xmit_done(struct net_device *ndev) if (cur_p->app4) dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); cur_p->app0 = 0; + cur_p->app1 = 0; + cur_p->app2 = 0; + cur_p->app3 = 0; + cur_p->app4 = 0; ndev->stats.tx_packets++; ndev->stats.tx_bytes += cur_p->len; @@ -589,6 +593,29 @@ static void temac_start_xmit_done(struct net_device *ndev) netif_wake_queue(ndev); } +static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) +{ + struct cdmac_bd *cur_p; + int tail; + + tail = lp->tx_bd_tail; + cur_p = &lp->tx_bd_v[tail]; + + do { + if (cur_p->app0) + return NETDEV_TX_BUSY; + + tail++; + if (tail >= TX_BD_NUM) + tail = 0; + + cur_p = &lp->tx_bd_v[tail]; + num_frag--; + } while (num_frag >= 0); + + return 0; +} + static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); @@ -603,7 +630,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (cur_p->app0 & STS_CTRL_APP0_CMPLT) { + if (temac_check_tx_bd_space(lp, num_frag)) { if (!netif_queue_stopped(ndev)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; @@ -613,29 +640,14 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) cur_p->app0 = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { - const struct iphdr *ip = ip_hdr(skb); - int length = 0, start = 0, insert = 0; - - switch (ip->protocol) { - case IPPROTO_TCP: - start = sizeof(struct iphdr) + ETH_HLEN; - insert = sizeof(struct iphdr) + ETH_HLEN + 16; - length = ip->tot_len - sizeof(struct iphdr); - break; - case IPPROTO_UDP: - start = sizeof(struct iphdr) + ETH_HLEN; - insert = sizeof(struct iphdr) + ETH_HLEN + 6; - length = ip->tot_len - sizeof(struct iphdr); - break; - default: - break; - } - cur_p->app1 = ((start << 16) | insert); - cur_p->app2 = csum_tcpudp_magic(ip->saddr, ip->daddr, - length, ip->protocol, 0); - skb->data[insert] = 0; - skb->data[insert + 1] = 0; + unsigned int csum_start_off = skb_transport_offset(skb); + unsigned int csum_index_off = csum_start_off + skb->csum_offset; + + cur_p->app0 |= 1; /* TX Checksum Enabled */ + cur_p->app1 = (csum_start_off << 16) | csum_index_off; + cur_p->app2 = 0; /* initial checksum seed */ } + cur_p->app0 |= STS_CTRL_APP0_SOP; cur_p->len = skb_headlen(skb); cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len, @@ -699,6 +711,15 @@ static void ll_temac_recv(struct net_device *ndev) skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; + /* if we're doing rx csum offload, set it up */ + if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && + (skb->protocol == __constant_htons(ETH_P_IP)) && + (skb->len > 64)) { + + skb->csum = cur_p->app3 & 0xFFFF; + skb->ip_summed = CHECKSUM_COMPLETE; + } + netif_rx(skb); ndev->stats.rx_packets++; @@ -883,6 +904,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) struct temac_local *lp; struct net_device *ndev; const void *addr; + __be32 *p; int size, rc = 0; /* Init network device structure */ @@ -926,6 +948,18 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) goto nodev; } + /* Setup checksum offload, but default to off if not specified */ + lp->temac_features = 0; + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); + if (p && be32_to_cpu(*p)) { + lp->temac_features |= TEMAC_FEATURE_TX_CSUM; + /* Can checksum TCP/UDP over IPv4. */ + ndev->features |= NETIF_F_IP_CSUM; + } + p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); + if (p && be32_to_cpu(*p)) + lp->temac_features |= TEMAC_FEATURE_RX_CSUM; + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); if (!np) { @@ -950,7 +984,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match) lp->rx_irq = irq_of_parse_and_map(np, 0); lp->tx_irq = irq_of_parse_and_map(np, 1); - if (!lp->rx_irq || !lp->tx_irq) { + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { dev_err(&op->dev, "could not determine irqs\n"); rc = -ENOMEM; goto nodev; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 78eb3190b9b1..1edb7a61983c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); if (err < 0) dev_kfree_skb(skb); @@ -385,8 +385,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) /* chain first in list head */ first->private = (unsigned long)list; - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, - first); + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, + first, gfp); if (err < 0) give_pages(vi, first); @@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page); + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); if (err < 0) give_pages(vi, page); diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 82ab532a4923..a93dc18a45c3 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -739,17 +739,27 @@ err_out: static void ar9170_usb_firmware_failed(struct ar9170_usb *aru) { struct device *parent = aru->udev->dev.parent; + struct usb_device *udev; + + /* + * Store a copy of the usb_device pointer locally. + * This is because device_release_driver initiates + * ar9170_usb_disconnect, which in turn frees our + * driver context (aru). + */ + udev = aru->udev; complete(&aru->firmware_loading_complete); /* unbind anything failed */ if (parent) device_lock(parent); - device_release_driver(&aru->udev->dev); + + device_release_driver(&udev->dev); if (parent) device_unlock(parent); - usb_put_dev(aru->udev); + usb_put_dev(udev); } static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 3db19172b43b..859aa4ab0769 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1198,7 +1198,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) int r; ath_print(common, ATH_DBG_FATAL, - "Unable to stop TxDMA. Reset HAL!\n"); + "Failed to stop TX DMA. Resetting hardware!\n"); spin_lock_bh(&sc->sc_resetlock); r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); @@ -1728,6 +1728,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, } else bf->bf_isnullfunc = false; + bf->bf_tx_aborted = false; + return 0; } @@ -1989,7 +1991,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, int nbad = 0; int isaggr = 0; - if (bf->bf_tx_aborted) + if (bf->bf_lastbf->bf_tx_aborted) return 0; isaggr = bf_isaggr(bf); diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c index a115bfa9513a..7a377f5b7662 100644 --- a/drivers/net/wireless/libertas/rx.c +++ b/drivers/net/wireless/libertas/rx.c @@ -329,9 +329,8 @@ static int process_rxed_802_11_packet(struct lbs_private *priv, /* create the exported radio header */ /* radiotap header */ - radiotap_hdr.hdr.it_version = 0; - /* XXX must check this value for pad */ - radiotap_hdr.hdr.it_pad = 0; + memset(&radiotap_hdr, 0, sizeof(radiotap_hdr)); + /* XXX must check radiotap_hdr.hdr.it_pad for pad */ radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 699161327d65..0f8b84b7224c 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -413,7 +413,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, */ rt2x00_desc_read(txi, 0, &word); rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, - skb->len - TXINFO_DESC_SIZE); + skb->len + TXWI_DESC_SIZE); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index 1586e1caa2f5..8bef6d60f88b 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include #include @@ -31,7 +33,6 @@ #define DPRINTK(x...) do { } while (0) #endif -static struct parport *this_port = NULL; static void amiga_write_data(struct parport *p, unsigned char data) { @@ -227,18 +228,11 @@ static struct parport_operations pp_amiga_ops = { /* ----------- Initialisation code --------------------------------- */ -static int __init parport_amiga_init(void) +static int __init amiga_parallel_probe(struct platform_device *pdev) { struct parport *p; int err; - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(AMI_PARALLEL)) - return -ENODEV; - - err = -EBUSY; - if (!request_mem_region(CIAA_PHYSADDR-1+0x100, 0x100, "parallel")) - goto out_mem; - ciaa.ddrb = 0xff; ciab.ddra &= 0xf8; mb(); @@ -246,41 +240,63 @@ static int __init parport_amiga_init(void) p = parport_register_port((unsigned long)&ciaa.prb, IRQ_AMIGA_CIAA_FLG, PARPORT_DMA_NONE, &pp_amiga_ops); if (!p) - goto out_port; + return -EBUSY; - err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, p); + err = request_irq(IRQ_AMIGA_CIAA_FLG, parport_irq_handler, 0, p->name, + p); if (err) goto out_irq; - this_port = p; printk(KERN_INFO "%s: Amiga built-in port using irq\n", p->name); /* XXX: set operating mode */ parport_announce_port(p); + platform_set_drvdata(pdev, p); + return 0; out_irq: parport_put_port(p); -out_port: - release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100); -out_mem: return err; } -static void __exit parport_amiga_exit(void) +static int __exit amiga_parallel_remove(struct platform_device *pdev) +{ + struct parport *port = platform_get_drvdata(pdev); + + parport_remove_port(port); + if (port->irq != PARPORT_IRQ_NONE) + free_irq(IRQ_AMIGA_CIAA_FLG, port); + parport_put_port(port); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static struct platform_driver amiga_parallel_driver = { + .remove = __exit_p(amiga_parallel_remove), + .driver = { + .name = "amiga-parallel", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_parallel_init(void) +{ + return platform_driver_probe(&amiga_parallel_driver, + amiga_parallel_probe); +} + +module_init(amiga_parallel_init); + +static void __exit amiga_parallel_exit(void) { - parport_remove_port(this_port); - if (this_port->irq != PARPORT_IRQ_NONE) - free_irq(IRQ_AMIGA_CIAA_FLG, this_port); - parport_put_port(this_port); - release_mem_region(CIAA_PHYSADDR-1+0x100, 0x100); + platform_driver_unregister(&amiga_parallel_driver); } +module_exit(amiga_parallel_exit); MODULE_AUTHOR("Joerg Dorchain "); MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port"); MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port"); MODULE_LICENSE("GPL"); - -module_init(parport_amiga_init) -module_exit(parport_amiga_exit) +MODULE_ALIAS("platform:amiga-parallel"); diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 7aaae2d2bd67..80c11d131499 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev) } #endif +#ifdef CONFIG_ACPI_APEI +extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev); +#else +static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev) +{ + if (pci_dev->__aer_firmware_first_valid) + return pci_dev->__aer_firmware_first; + return 0; +} +#endif + +static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev, + int enable) +{ + pci_dev->__aer_firmware_first = !!enable; + pci_dev->__aer_firmware_first_valid = 1; +} #endif /* _AERDRV_H_ */ diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 04814087658d..f278d7b0d95d 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "aerdrv.h" /** @@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev) return 0; } + +#ifdef CONFIG_ACPI_APEI +static inline int hest_match_pci(struct acpi_hest_aer_common *p, + struct pci_dev *pci) +{ + return (0 == pci_domain_nr(pci->bus) && + p->bus == pci->bus->number && + p->device == PCI_SLOT(pci->devfn) && + p->function == PCI_FUNC(pci->devfn)); +} + +struct aer_hest_parse_info { + struct pci_dev *pci_dev; + int firmware_first; +}; + +static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) +{ + struct aer_hest_parse_info *info = data; + struct acpi_hest_aer_common *p; + u8 pcie_type = 0; + u8 bridge = 0; + int ff = 0; + + switch (hest_hdr->type) { + case ACPI_HEST_TYPE_AER_ROOT_PORT: + pcie_type = PCI_EXP_TYPE_ROOT_PORT; + break; + case ACPI_HEST_TYPE_AER_ENDPOINT: + pcie_type = PCI_EXP_TYPE_ENDPOINT; + break; + case ACPI_HEST_TYPE_AER_BRIDGE: + if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) + bridge = 1; + break; + default: + return 0; + } + + p = (struct acpi_hest_aer_common *)(hest_hdr + 1); + if (p->flags & ACPI_HEST_GLOBAL) { + if ((info->pci_dev->is_pcie && + info->pci_dev->pcie_type == pcie_type) || bridge) + ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + } else + if (hest_match_pci(p, info->pci_dev)) + ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + info->firmware_first = ff; + + return 0; +} + +static void aer_set_firmware_first(struct pci_dev *pci_dev) +{ + int rc; + struct aer_hest_parse_info info = { + .pci_dev = pci_dev, + .firmware_first = 0, + }; + + rc = apei_hest_parse(aer_hest_parse, &info); + + if (rc) + pci_dev->__aer_firmware_first = 0; + else + pci_dev->__aer_firmware_first = info.firmware_first; + pci_dev->__aer_firmware_first_valid = 1; +} + +int pcie_aer_get_firmware_first(struct pci_dev *dev) +{ + if (!dev->__aer_firmware_first_valid) + aer_set_firmware_first(dev); + return dev->__aer_firmware_first; +} +#endif diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index df2d686fe3dd..8af4f619bba2 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; - if (dev->aer_firmware_first) + if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); @@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev) u16 reg16 = 0; int pos; - if (dev->aer_firmware_first) + if (pcie_aer_get_firmware_first(dev)) return -EIO; pos = pci_pcie_cap(dev); @@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work) */ int aer_init(struct pcie_device *dev) { - if (dev->port->aer_firmware_first) { + if (pcie_aer_get_firmware_first(dev->port)) { dev_printk(KERN_DEBUG, &dev->device, "PCIe errors handled by platform firmware.\n"); goto out; @@ -785,7 +785,7 @@ out: if (forceload) { dev_printk(KERN_DEBUG, &dev->device, "aerdrv forceload requested.\n"); - dev->port->aer_firmware_first = 0; + pcie_aer_force_firmware_first(dev->port, 0); return 0; } return -ENXIO; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c82548afcd5c..f4adba2d1dd3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -10,7 +10,6 @@ #include #include #include -#include #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ @@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) pdev->is_hotplug_bridge = 1; } -static void set_pci_aer_firmware_first(struct pci_dev *pdev) -{ - if (acpi_hest_firmware_first_pci(pdev)) - pdev->aer_firmware_first = 1; -} - #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** @@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev) dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); - set_pci_aer_firmware_first(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 576c3ed92435..40658e3385b4 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -524,7 +524,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, for (i = 0; i < inlen; i++) ipc_data_writel(*in++, 4 * i); - ipc_command(cmd << 12 | sub); + ipc_command((cmd << 12) | sub | (inlen << 18)); err = busy_loop(); for (i = 0; i < outlen; i++) diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index c32822ad84a4..070211a5955c 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -8,3 +8,27 @@ config RAPIDIO_DISC_TIMEOUT ---help--- Amount of time a discovery node waits for a host to complete enumeration before giving up. + +config RAPIDIO_ENABLE_RX_TX_PORTS + bool "Enable RapidIO Input/Output Ports" + depends on RAPIDIO + ---help--- + The RapidIO specification describes a Output port transmit + enable and a Input port receive enable. The recommended state + for Input ports and Output ports should be disabled. When + this switch is set the RapidIO subsystem will enable all + ports for Input/Output direction to allow other traffic + than Maintenance transfers. + +source "drivers/rapidio/switches/Kconfig" + +config RAPIDIO_DEBUG + bool "RapidIO subsystem debug messages" + depends on RAPIDIO + help + Say Y here if you want the RapidIO subsystem to produce a bunch of + debug messages to the system log. Select this if you are having a + problem with the RapidIO subsystem and want to see more of what is + going on. + + If you are unsure about this, say N here. diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index 7c0e1818de51..b6139fe187bf 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile @@ -4,3 +4,7 @@ obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o obj-$(CONFIG_RAPIDIO) += switches/ + +ifeq ($(CONFIG_RAPIDIO_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index 45415096c294..8070e074c739 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -4,6 +4,14 @@ * Copyright 2005 MontaVista Software, Inc. * Matt Porter * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine + * - Added Port-Write/Error Management initialization and handling + * + * Copyright 2009 Sysgo AG + * Thomas Moll + * - Added Input- Output- enable functionality, to allow full communication + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your @@ -31,15 +39,16 @@ LIST_HEAD(rio_devices); static LIST_HEAD(rio_switches); -#define RIO_ENUM_CMPL_MAGIC 0xdeadbeef - static void rio_enum_timeout(unsigned long); +static void rio_init_em(struct rio_dev *rdev); + DEFINE_SPINLOCK(rio_global_list_lock); static int next_destid = 0; static int next_switchid = 0; static int next_net = 0; +static int next_comptag; static struct timer_list rio_enum_timer = TIMER_INITIALIZER(rio_enum_timeout, 0, 0); @@ -52,12 +61,6 @@ static int rio_mport_phys_table[] = { -1, }; -static int rio_sport_phys_table[] = { - RIO_EFB_PAR_EP_FREE_ID, - RIO_EFB_SER_EP_FREE_ID, - -1, -}; - /** * rio_get_device_id - Get the base/extended device id for a device * @port: RIO master port @@ -118,12 +121,26 @@ static int rio_clear_locks(struct rio_mport *port) u32 result; int ret = 0; - /* Write component tag CSR magic complete value */ - rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, - RIO_ENUM_CMPL_MAGIC); - list_for_each_entry(rdev, &rio_devices, global_list) - rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, - RIO_ENUM_CMPL_MAGIC); + /* Assign component tag to all devices */ + next_comptag = 1; + rio_local_write_config_32(port, RIO_COMPONENT_TAG_CSR, next_comptag++); + + list_for_each_entry(rdev, &rio_devices, global_list) { + /* Mark device as discovered */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + &result); + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + result | RIO_PORT_GEN_DISCOVERED); + + rio_write_config_32(rdev, RIO_COMPONENT_TAG_CSR, next_comptag); + rdev->comp_tag = next_comptag++; + if (next_comptag >= 0x10000) { + pr_err("RIO: Component Tag Counter Overflow\n"); + break; + } + } /* Release host device id locks */ rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, @@ -229,27 +246,37 @@ static int rio_is_switch(struct rio_dev *rdev) } /** - * rio_route_set_ops- Sets routing operations for a particular vendor switch + * rio_switch_init - Sets switch operations for a particular vendor switch * @rdev: RIO device + * @do_enum: Enumeration/Discovery mode flag * - * Searches the RIO route ops table for known switch types. If the vid - * and did match a switch table entry, then set the add_entry() and - * get_entry() ops to the table entry values. + * Searches the RIO switch ops table for known switch types. If the vid + * and did match a switch table entry, then call switch initialization + * routine to setup switch-specific routines. */ -static void rio_route_set_ops(struct rio_dev *rdev) +static void rio_switch_init(struct rio_dev *rdev, int do_enum) { - struct rio_route_ops *cur = __start_rio_route_ops; - struct rio_route_ops *end = __end_rio_route_ops; + struct rio_switch_ops *cur = __start_rio_switch_ops; + struct rio_switch_ops *end = __end_rio_switch_ops; while (cur < end) { if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) { - pr_debug("RIO: adding routing ops for %s\n", rio_name(rdev)); - rdev->rswitch->add_entry = cur->add_hook; - rdev->rswitch->get_entry = cur->get_hook; + pr_debug("RIO: calling init routine for %s\n", + rio_name(rdev)); + cur->init_hook(rdev, do_enum); + break; } cur++; } + if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) { + pr_debug("RIO: adding STD routing ops for %s\n", + rio_name(rdev)); + rdev->rswitch->add_entry = rio_std_route_add_entry; + rdev->rswitch->get_entry = rio_std_route_get_entry; + rdev->rswitch->clr_table = rio_std_route_clr_table; + } + if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry) printk(KERN_ERR "RIO: missing routing ops for %s\n", rio_name(rdev)); @@ -280,6 +307,65 @@ static int __devinit rio_add_device(struct rio_dev *rdev) return 0; } +/** + * rio_enable_rx_tx_port - enable input reciever and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +inline int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) { +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + + if (local) { + rio_local_read_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + return -EIO; + } + + if (regval & RIO_PORT_N_CTL_P_TYP_SER) { + /* serial */ + regval = regval | RIO_PORT_N_CTL_EN_RX_SER + | RIO_PORT_N_CTL_EN_TX_SER; + } else { + /* parallel */ + regval = regval | RIO_PORT_N_CTL_EN_RX_PAR + | RIO_PORT_N_CTL_EN_TX_PAR; + } + + if (local) { + rio_local_write_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + return -EIO; + } +#endif + return 0; +} + /** * rio_setup_device- Allocates and sets up a RIO device * @net: RIO network @@ -325,8 +411,14 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rdev->asm_rev = result >> 16; rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, &rdev->pef); - if (rdev->pef & RIO_PEF_EXT_FEATURES) + if (rdev->pef & RIO_PEF_EXT_FEATURES) { rdev->efptr = result & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, + hopcount); + + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + } rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, &rdev->src_ops); @@ -349,12 +441,13 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, if (rio_is_switch(rdev)) { rio_mport_read_config_32(port, destid, hopcount, RIO_SWP_INFO_CAR, &rdev->swpinfo); - rswitch = kmalloc(sizeof(struct rio_switch), GFP_KERNEL); + rswitch = kzalloc(sizeof(struct rio_switch), GFP_KERNEL); if (!rswitch) goto cleanup; rswitch->switchid = next_switchid; rswitch->hopcount = hopcount; rswitch->destid = destid; + rswitch->port_ok = 0; rswitch->route_table = kzalloc(sizeof(u8)* RIO_MAX_ROUTE_ENTRIES(port->sys_size), GFP_KERNEL); @@ -367,13 +460,22 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rdev->rswitch = rswitch; dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, rdev->rswitch->switchid); - rio_route_set_ops(rdev); + rio_switch_init(rdev, do_enum); + + if (do_enum && rdev->rswitch->clr_table) + rdev->rswitch->clr_table(port, destid, hopcount, + RIO_GLOBAL_TABLE); list_add_tail(&rswitch->node, &rio_switches); - } else + } else { + if (do_enum) + /*Enable Input Output Port (transmitter reviever)*/ + rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); + dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, rdev->destid); + } rdev->dev.bus = &rio_bus_type; @@ -414,23 +516,29 @@ cleanup: * * Reads the port error status CSR for a particular switch port to * determine if the port has an active link. Returns - * %PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is + * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is * inactive. */ static int rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) { - u32 result; + u32 result = 0; u32 ext_ftr_ptr; - int *entry = rio_sport_phys_table; - - do { - if ((ext_ftr_ptr = - rio_mport_get_feature(port, 0, destid, hopcount, *entry))) + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0); + while (ext_ftr_ptr) { + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &result); + result = RIO_GET_BLOCK_ID(result); + if ((result == RIO_EFB_SER_EP_FREE_ID) || + (result == RIO_EFB_SER_EP_FREE_ID_V13P) || + (result == RIO_EFB_SER_EP_FREC_ID)) break; - } while (*++entry >= 0); + + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, + ext_ftr_ptr); + } if (ext_ftr_ptr) rio_mport_read_config_32(port, destid, hopcount, @@ -438,7 +546,81 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) RIO_PORT_N_ERR_STS_CSR(sport), &result); - return (result & PORT_N_ERR_STS_PORT_OK); + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +static int +rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms) +{ + u32 result; + int tcnt = 0; + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + while (result != port->host_deviceid) { + if (wait_ms != 0 && tcnt == wait_ms) { + pr_debug("RIO: timeout when locking device %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + /* Delay a bit */ + mdelay(1); + tcnt++; + /* Try to acquire device lock again */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + } + + return 0; +} + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +static int +rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + /* Release device lock */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + pr_debug("RIO: badness when releasing device lock %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + return 0; } /** @@ -448,6 +630,7 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Port number to be routed + * @lock: lock switch device flag * * Calls the switch specific add_entry() method to add a route entry * on a switch. The route table can be specified using the @table @@ -456,12 +639,26 @@ rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) * %RIO_GLOBAL_TABLE in @table. Returns %0 on success or %-EINVAL * on failure. */ -static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, - u16 table, u16 route_destid, u8 route_port) +static int +rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch, + u16 table, u16 route_destid, u8 route_port, int lock) { - return rswitch->add_entry(mport, rswitch->destid, + int rc; + + if (lock) { + rc = rio_lock_device(mport, rswitch->destid, + rswitch->hopcount, 1000); + if (rc) + return rc; + } + + rc = rswitch->add_entry(mport, rswitch->destid, rswitch->hopcount, table, route_destid, route_port); + if (lock) + rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + + return rc; } /** @@ -471,6 +668,7 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit * @table: Routing table ID * @route_destid: Destination ID to be routed * @route_port: Pointer to read port number into + * @lock: lock switch device flag * * Calls the switch specific get_entry() method to read a route entry * in a switch. The route table can be specified using the @table @@ -481,11 +679,24 @@ static int rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswit */ static int rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table, - u16 route_destid, u8 * route_port) + u16 route_destid, u8 *route_port, int lock) { - return rswitch->get_entry(mport, rswitch->destid, + int rc; + + if (lock) { + rc = rio_lock_device(mport, rswitch->destid, + rswitch->hopcount, 1000); + if (rc) + return rc; + } + + rc = rswitch->get_entry(mport, rswitch->destid, rswitch->hopcount, table, route_destid, route_port); + if (lock) + rio_unlock_device(mport, rswitch->destid, rswitch->hopcount); + + return rc; } /** @@ -625,14 +836,14 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, sw_inport = rio_get_swpinfo_inport(port, RIO_ANY_DESTID(port->sys_size), hopcount); rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, - port->host_deviceid, sw_inport); + port->host_deviceid, sw_inport, 0); rdev->rswitch->route_table[port->host_deviceid] = sw_inport; for (destid = 0; destid < next_destid; destid++) { if (destid == port->host_deviceid) continue; rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, - destid, sw_inport); + destid, sw_inport, 0); rdev->rswitch->route_table[destid] = sw_inport; } @@ -644,8 +855,15 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, rio_name(rdev), rdev->vid, rdev->did, num_ports); sw_destid = next_destid; for (port_num = 0; port_num < num_ports; port_num++) { - if (sw_inport == port_num) + /*Enable Input Output Port (transmitter reviever)*/ + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + + if (sw_inport == port_num) { + rdev->rswitch->port_ok |= (1 << port_num); continue; + } cur_destid = next_destid; @@ -655,10 +873,11 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, pr_debug( "RIO: scanning device on port %d\n", port_num); + rdev->rswitch->port_ok |= (1 << port_num); rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, RIO_ANY_DESTID(port->sys_size), - port_num); + port_num, 0); if (rio_enum_peer(net, port, hopcount + 1) < 0) return -1; @@ -672,15 +891,35 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, destid, - port_num); + port_num, + 0); rdev->rswitch-> route_table[destid] = port_num; } } + } else { + /* If switch supports Error Management, + * set PORT_LOCKOUT bit for unused port + */ + if (rdev->em_efptr) + rio_set_port_lockout(rdev, port_num, 1); + + rdev->rswitch->port_ok &= ~(1 << port_num); } } + /* Direct Port-write messages to the enumeratiing host */ + if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) && + (rdev->em_efptr)) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TGT_DEVID, + (port->host_deviceid << 16) | + (port->sys_size << 15)); + } + + rio_init_em(rdev); + /* Check for empty switch */ if (next_destid == sw_destid) { next_destid++; @@ -700,21 +939,16 @@ static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, * rio_enum_complete- Tests if enumeration of a network is complete * @port: Master port to send transaction * - * Tests the Component Tag CSR for presence of the magic enumeration - * complete flag. Return %1 if enumeration is complete or %0 if + * Tests the Component Tag CSR for non-zero value (enumeration + * complete flag). Return %1 if enumeration is complete or %0 if * enumeration is incomplete. */ static int rio_enum_complete(struct rio_mport *port) { u32 tag_csr; - int ret = 0; rio_local_read_config_32(port, RIO_COMPONENT_TAG_CSR, &tag_csr); - - if (tag_csr == RIO_ENUM_CMPL_MAGIC) - ret = 1; - - return ret; + return (tag_csr & 0xffff) ? 1 : 0; } /** @@ -763,17 +997,21 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, pr_debug( "RIO: scanning device on port %d\n", port_num); + + rio_lock_device(port, destid, hopcount, 1000); + for (ndestid = 0; ndestid < RIO_ANY_DESTID(port->sys_size); ndestid++) { rio_route_get_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE, ndestid, - &route_port); + &route_port, 0); if (route_port == port_num) break; } + rio_unlock_device(port, destid, hopcount); if (rio_disc_peer (net, port, ndestid, hopcount + 1) < 0) return -1; @@ -792,7 +1030,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, * * Reads the port error status CSR for the master port to * determine if the port has an active link. Returns - * %PORT_N_ERR_STS_PORT_OK if the master port is active + * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active * or %0 if it is inactive. */ static int rio_mport_is_active(struct rio_mport *port) @@ -813,7 +1051,7 @@ static int rio_mport_is_active(struct rio_mport *port) RIO_PORT_N_ERR_STS_CSR(port->index), &result); - return (result & PORT_N_ERR_STS_PORT_OK); + return result & RIO_PORT_N_ERR_STS_PORT_OK; } /** @@ -866,12 +1104,17 @@ static void rio_update_route_tables(struct rio_mport *port) continue; if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { + /* Skip if destid ends in empty switch*/ + if (rswitch->destid == destid) + continue; sport = rio_get_swpinfo_inport(port, rswitch->destid, rswitch->hopcount); if (rswitch->add_entry) { - rio_route_add_entry(port, rswitch, RIO_GLOBAL_TABLE, destid, sport); + rio_route_add_entry(port, rswitch, + RIO_GLOBAL_TABLE, destid, + sport, 0); rswitch->route_table[destid] = sport; } } @@ -879,6 +1122,32 @@ static void rio_update_route_tables(struct rio_mport *port) } } +/** + * rio_init_em - Initializes RIO Error Management (for switches) + * @rdev: RIO device + * + * For each enumerated switch, call device-specific error management + * initialization routine (if supplied by the switch driver). + */ +static void rio_init_em(struct rio_dev *rdev) +{ + if (rio_is_switch(rdev) && (rdev->em_efptr) && + (rdev->rswitch->em_init)) { + rdev->rswitch->em_init(rdev); + } +} + +/** + * rio_pw_enable - Enables/disables port-write handling by a master port + * @port: Master port associated with port-write handling + * @enable: 1=enable, 0=disable + */ +static void rio_pw_enable(struct rio_mport *port, int enable) +{ + if (port->ops->pwenable) + port->ops->pwenable(port, enable); +} + /** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions @@ -911,6 +1180,10 @@ int __devinit rio_enum_mport(struct rio_mport *mport) rc = -ENOMEM; goto out; } + + /* Enable Input Output Port (transmitter reviever) */ + rio_enable_rx_tx_port(mport, 1, 0, 0, 0); + if (rio_enum_peer(net, mport, 0) < 0) { /* A higher priority host won enumeration, bail. */ printk(KERN_INFO @@ -922,6 +1195,7 @@ int __devinit rio_enum_mport(struct rio_mport *mport) } rio_update_route_tables(mport); rio_clear_locks(mport); + rio_pw_enable(mport, 1); } else { printk(KERN_INFO "RIO: master port %d link inactive\n", mport->id); @@ -945,15 +1219,22 @@ static void rio_build_route_tables(void) u8 sport; list_for_each_entry(rdev, &rio_devices, global_list) - if (rio_is_switch(rdev)) - for (i = 0; - i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); - i++) { - if (rio_route_get_entry - (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE, - i, &sport) < 0) - continue; - rdev->rswitch->route_table[i] = sport; + if (rio_is_switch(rdev)) { + rio_lock_device(rdev->net->hport, rdev->rswitch->destid, + rdev->rswitch->hopcount, 1000); + for (i = 0; + i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); + i++) { + if (rio_route_get_entry + (rdev->net->hport, rdev->rswitch, + RIO_GLOBAL_TABLE, i, &sport, 0) < 0) + continue; + rdev->rswitch->route_table[i] = sport; + } + + rio_unlock_device(rdev->net->hport, + rdev->rswitch->destid, + rdev->rswitch->hopcount); } } @@ -1012,6 +1293,13 @@ int __devinit rio_disc_mport(struct rio_mport *mport) del_timer_sync(&rio_enum_timer); pr_debug("done\n"); + + /* Read DestID assigned by enumerator */ + rio_local_read_config_32(mport, RIO_DID_CSR, + &mport->host_deviceid); + mport->host_deviceid = RIO_GET_DID(mport->sys_size, + mport->host_deviceid); + if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), 0) < 0) { printk(KERN_INFO diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 6395c780008b..08fa453af974 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -5,6 +5,10 @@ * Copyright 2005 MontaVista Software, Inc. * Matt Porter * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine + * - Added Port-Write/Error Management initialization and handling + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your @@ -332,6 +336,331 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) return rc; } +/** + * rio_request_inb_pwrite - request inbound port-write message service + * @rdev: RIO device to which register inbound port-write callback routine + * @pwcback: Callback routine to execute when port-write is received + * + * Binds a port-write callback function to the RapidIO device. + * Returns 0 if the request has been satisfied. + */ +int rio_request_inb_pwrite(struct rio_dev *rdev, + int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) +{ + int rc = 0; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback != NULL) + rc = -ENOMEM; + else + rdev->pwcback = pwcback; + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); + +/** + * rio_release_inb_pwrite - release inbound port-write message service + * @rdev: RIO device which registered for inbound port-write callback + * + * Removes callback from the rio_dev structure. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_pwrite(struct rio_dev *rdev) +{ + int rc = -ENOMEM; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback) { + rdev->pwcback = NULL; + rc = 0; + } + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); + +/** + * rio_mport_get_physefb - Helper function that returns register offset + * for Physical Layer Extended Features Block. + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + */ +u32 +rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount) +{ + u32 ext_ftr_ptr; + u32 ftr_header; + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + + ftr_header = RIO_GET_BLOCK_ID(ftr_header); + switch (ftr_header) { + + case RIO_EFB_SER_EP_ID_V13P: + case RIO_EFB_SER_EP_REC_ID_V13P: + case RIO_EFB_SER_EP_FREE_ID_V13P: + case RIO_EFB_SER_EP_ID: + case RIO_EFB_SER_EP_REC_ID: + case RIO_EFB_SER_EP_FREE_ID: + case RIO_EFB_SER_EP_FREC_ID: + + return ext_ftr_ptr; + + default: + break; + } + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, + hopcount, ext_ftr_ptr); + } + + return ext_ftr_ptr; +} + +/** + * rio_get_comptag - Begin or continue searching for a RIO device by component tag + * @comp_tag: RIO component tag to match + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @comp_tag, a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. + */ +static struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if (rdev->comp_tag == comp_tag) + goto exit; + n = n->next; + } + rdev = NULL; +exit: + spin_unlock(&rio_global_list_lock); + return rdev; +} + +/** + * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to set LOCKOUT bit + * @lock: Operation : set (=1) or clear (=0) + */ +int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) +{ + u8 hopcount = 0xff; + u16 destid = rdev->destid; + u32 regval; + + if (rdev->rswitch) { + destid = rdev->rswitch->destid; + hopcount = rdev->rswitch->hopcount; + } + + rio_mport_read_config_32(rdev->net->hport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + ®val); + if (lock) + regval |= RIO_PORT_N_CTL_LOCKOUT; + else + regval &= ~RIO_PORT_N_CTL_LOCKOUT; + + rio_mport_write_config_32(rdev->net->hport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + regval); + return 0; +} + +/** + * rio_inb_pwrite_handler - process inbound port-write message + * @pw_msg: pointer to inbound port-write message + * + * Processes an inbound port-write message. Returns 0 if the request + * has been satisfied. + */ +int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) +{ + struct rio_dev *rdev; + struct rio_mport *mport; + u8 hopcount; + u16 destid; + u32 err_status; + int rc, portnum; + + rdev = rio_get_comptag(pw_msg->em.comptag, NULL); + if (rdev == NULL) { + /* Someting bad here (probably enumeration error) */ + pr_err("RIO: %s No matching device for CTag 0x%08x\n", + __func__, pw_msg->em.comptag); + return -EIO; + } + + pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + +#ifdef DEBUG_PW + { + u32 i; + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { + pr_debug("0x%02x: %08x %08x %08x %08x", + i*4, pw_msg->raw[i], pw_msg->raw[i + 1], + pw_msg->raw[i + 2], pw_msg->raw[i + 3]); + i += 4; + } + pr_debug("\n"); + } +#endif + + /* Call an external service function (if such is registered + * for this device). This may be the service for endpoints that send + * device-specific port-write messages. End-point messages expected + * to be handled completely by EP specific device driver. + * For switches rc==0 signals that no standard processing required. + */ + if (rdev->pwcback != NULL) { + rc = rdev->pwcback(rdev, pw_msg, 0); + if (rc == 0) + return 0; + } + + /* For End-point devices processing stops here */ + if (!(rdev->pef & RIO_PEF_SWITCH)) + return 0; + + if (rdev->phys_efptr == 0) { + pr_err("RIO_PW: Bad switch initialization for %s\n", + rio_name(rdev)); + return 0; + } + + mport = rdev->net->hport; + destid = rdev->rswitch->destid; + hopcount = rdev->rswitch->hopcount; + + /* + * Process the port-write notification from switch + */ + + portnum = pw_msg->em.is_port & 0xFF; + + if (rdev->rswitch->em_handle) + rdev->rswitch->em_handle(rdev, portnum); + + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); + + if (pw_msg->em.errdetect) { + pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", + portnum, pw_msg->em.errdetect); + /* Clear EM Port N Error Detect CSR */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); + } + + if (pw_msg->em.ltlerrdet) { + pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", + pw_msg->em.ltlerrdet); + /* Clear EM L/T Layer Error Detect CSR */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); + } + + /* Clear Port Errors */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + err_status & RIO_PORT_N_ERR_STS_CLR_MASK); + + if (rdev->rswitch->port_ok & (1 << portnum)) { + if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) { + rdev->rswitch->port_ok &= ~(1 << portnum); + rio_set_port_lockout(rdev, portnum, 1); + + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ACK_STS_CSR(portnum), + RIO_PORT_N_ACK_CLEAR); + + /* Schedule Extraction Service */ + pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } else { + if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { + rdev->rswitch->port_ok |= (1 << portnum); + rio_set_port_lockout(rdev, portnum, 0); + + /* Schedule Insertion Service */ + pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } + + /* Clear Port-Write Pending bit */ + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + RIO_PORT_N_ERR_STS_PW_PEND); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); + +/** + * rio_mport_get_efb - get pointer to next extended features block + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @from: Offset of current Extended Feature block header (if 0 starts + * from ExtFeaturePtr) + */ +u32 +rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from) +{ + u32 reg_val; + + if (from == 0) { + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, + ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, ®_val); + return reg_val & RIO_EXT_FTR_PTR_MASK; + } else { + if (local) + rio_local_read_config_32(port, from, ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + from, ®_val); + return RIO_GET_BLOCK_ID(reg_val); + } +} + /** * rio_mport_get_feature - query for devices' extended features * @port: Master port to issue transaction @@ -451,6 +780,110 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); } +/** + * rio_std_route_add_entry - Add switch route table entry using standard + * registers defined in RIO specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: destination port for specified destID + */ +int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + } + + udelay(10); + return 0; +} + +/** + * rio_std_route_get_entry - Read switch route table entry (port number) + * assosiated with specified destID using standard registers defined in RIO + * specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: returned destination port for specified destID + */ +int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + *route_port = (u8)result; + } + + return 0; +} + +/** + * rio_std_route_clr_table - Clear swotch route table using standard registers + * defined in RIO specification rev.1.3. + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + */ +int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 max_destid = 0xff; + u32 i, pef, id_inc = 1, ext_cfg = 0; + u32 port_sel = RIO_INVALID_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &pef); + + if (mport->sys_size) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWITCH_RT_LIMIT, + &max_destid); + max_destid &= RIO_RT_MAX_DESTID; + } + + if (pef & RIO_PEF_EXT_RT) { + ext_cfg = 0x80000000; + id_inc = 4; + port_sel = (RIO_INVALID_ROUTE << 24) | + (RIO_INVALID_ROUTE << 16) | + (RIO_INVALID_ROUTE << 8) | + RIO_INVALID_ROUTE; + } + + for (i = 0; i <= max_destid;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + ext_cfg | i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + port_sel); + i += id_inc; + } + } + + udelay(10); + return 0; +} + static void rio_fixup_device(struct rio_dev *dev) { } diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index 7786d02581f2..f27b7a9c47d2 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -18,38 +18,50 @@ extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, u8 hopcount, int ftr); +extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount); +extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from); extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); extern int rio_enum_mport(struct rio_mport *mport); extern int rio_disc_mport(struct rio_mport *mport); +extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table, u16 route_destid, + u8 route_port); +extern int rio_std_route_get_entry(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table, u16 route_destid, + u8 *route_port); +extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, + u8 hopcount, u16 table); +extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); /* Structures internal to the RIO core code */ extern struct device_attribute rio_dev_attrs[]; extern spinlock_t rio_global_list_lock; -extern struct rio_route_ops __start_rio_route_ops[]; -extern struct rio_route_ops __end_rio_route_ops[]; +extern struct rio_switch_ops __start_rio_switch_ops[]; +extern struct rio_switch_ops __end_rio_switch_ops[]; /* Helpers internal to the RIO core code */ -#define DECLARE_RIO_ROUTE_SECTION(section, vid, did, add_hook, get_hook) \ - static struct rio_route_ops __rio_route_ops __used \ - __section(section)= { vid, did, add_hook, get_hook }; +#define DECLARE_RIO_SWITCH_SECTION(section, name, vid, did, init_hook) \ + static const struct rio_switch_ops __rio_switch_##name __used \ + __section(section) = { vid, did, init_hook }; /** - * DECLARE_RIO_ROUTE_OPS - Registers switch routing operations + * DECLARE_RIO_SWITCH_INIT - Registers switch initialization routine * @vid: RIO vendor ID * @did: RIO device ID - * @add_hook: Callback that adds a route entry - * @get_hook: Callback that gets a route entry + * @init_hook: Callback that performs switch-specific initialization * - * Manipulating switch route tables in RIO is switch specific. This - * registers a switch by vendor and device ID with two callbacks for - * modifying and retrieving route entries in a switch. A &struct - * rio_route_ops is initialized with the ops and placed into a - * RIO-specific kernel section. + * Manipulating switch route tables and error management in RIO + * is switch specific. This registers a switch by vendor and device ID with + * initialization callback for setting up switch operations and (if required) + * hardware initialization. A &struct rio_switch_ops is initialized with + * pointer to the init routine and placed into a RIO-specific kernel section. */ -#define DECLARE_RIO_ROUTE_OPS(vid, did, add_hook, get_hook) \ - DECLARE_RIO_ROUTE_SECTION(.rio_route_ops, \ - vid, did, add_hook, get_hook) +#define DECLARE_RIO_SWITCH_INIT(vid, did, init_hook) \ + DECLARE_RIO_SWITCH_SECTION(.rio_switch_ops, vid##did, \ + vid, did, init_hook) #define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) #define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig new file mode 100644 index 000000000000..2b4e9b2b6631 --- /dev/null +++ b/drivers/rapidio/switches/Kconfig @@ -0,0 +1,28 @@ +# +# RapidIO switches configuration +# +config RAPIDIO_TSI57X + bool "IDT Tsi57x SRIO switches support" + depends on RAPIDIO + ---help--- + Includes support for IDT Tsi57x family of serial RapidIO switches. + +config RAPIDIO_CPS_XX + bool "IDT CPS-xx SRIO switches support" + depends on RAPIDIO + ---help--- + Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. + +config RAPIDIO_TSI568 + bool "Tsi568 SRIO switch support" + depends on RAPIDIO + default n + ---help--- + Includes support for IDT Tsi568 serial RapidIO switch. + +config RAPIDIO_TSI500 + bool "Tsi500 Parallel RapidIO switch support" + depends on RAPIDIO + default n + ---help--- + Includes support for IDT Tsi500 parallel RapidIO switch. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile index b924f8301761..fe4adc3e8d5f 100644 --- a/drivers/rapidio/switches/Makefile +++ b/drivers/rapidio/switches/Makefile @@ -2,4 +2,11 @@ # Makefile for RIO switches # -obj-$(CONFIG_RAPIDIO) += tsi500.o +obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o +obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o +obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o +obj-$(CONFIG_RAPIDIO_TSI500) += tsi500.o + +ifeq ($(CONFIG_RAPIDIO_DEBUG),y) +EXTRA_CFLAGS += -DDEBUG +endif diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c new file mode 100644 index 000000000000..2c790c144f89 --- /dev/null +++ b/drivers/rapidio/switches/idtcps.c @@ -0,0 +1,137 @@ +/* + * IDT CPS RapidIO switches support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include "../rio.h" + +#define CPS_DEFAULT_ROUTE 0xde +#define CPS_NO_ROUTE 0xdf + +#define IDTCPS_RIO_DOMAIN 0xf20020 + +static int +idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + result = (0xffffff00 & result) | (u32)route_port; + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, result); + } + + return 0; +} + +static int +idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (CPS_DEFAULT_ROUTE == (u8)result || + CPS_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + } + + return 0; +} + +static int +idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0x80000000; i <= 0x800000ff;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (CPS_DEFAULT_ROUTE << 24) | + (CPS_DEFAULT_ROUTE << 16) | + (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE); + i += 4; + } + } + + return 0; +} + +static int +idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = idtcps_route_add_entry; + rdev->rswitch->get_entry = idtcps_route_get_entry; + rdev->rswitch->clr_table = idtcps_route_clr_table; + rdev->rswitch->set_domain = idtcps_set_domain; + rdev->rswitch->get_domain = idtcps_get_domain; + rdev->rswitch->em_init = NULL; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS6Q, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS8, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS10Q, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS12, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS16, idtcps_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDT70K200, idtcps_switch_init); diff --git a/drivers/rapidio/switches/tsi500.c b/drivers/rapidio/switches/tsi500.c index c77c23bd9840..914eddd5aa42 100644 --- a/drivers/rapidio/switches/tsi500.c +++ b/drivers/rapidio/switches/tsi500.c @@ -1,6 +1,10 @@ /* * RapidIO Tsi500 switch support * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * - Modified switch operations initialization. + * * Copyright 2005 MontaVista Software, Inc. * Matt Porter * @@ -57,4 +61,18 @@ tsi500_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, u16 tab return ret; } -DECLARE_RIO_ROUTE_OPS(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_route_add_entry, tsi500_route_get_entry); +static int tsi500_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi500_route_add_entry; + rdev->rswitch->get_entry = tsi500_route_get_entry; + rdev->rswitch->clr_table = NULL; + rdev->rswitch->set_domain = NULL; + rdev->rswitch->get_domain = NULL; + rdev->rswitch->em_init = NULL; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI500, tsi500_switch_init); diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c new file mode 100644 index 000000000000..f7fd7898606e --- /dev/null +++ b/drivers/rapidio/switches/tsi568.c @@ -0,0 +1,146 @@ +/* + * RapidIO Tsi568 switch support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI568_SP_MODE_BC 0x10004 +#define TSI568_SP_MODE_PW_DIS 0x08000000 + +static int +tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, &result); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + } + + *route_port = result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), + RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi568_em_init(struct rio_dev *rdev) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 regval; + + pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount); + + /* Make sure that Port-Writes are disabled (for all ports) */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI568_SP_MODE_BC, ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI568_SP_MODE_BC, regval | TSI568_SP_MODE_PW_DIS); + + return 0; +} + +static int tsi568_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi568_route_add_entry; + rdev->rswitch->get_entry = tsi568_route_get_entry; + rdev->rswitch->clr_table = tsi568_route_clr_table; + rdev->rswitch->set_domain = NULL; + rdev->rswitch->get_domain = NULL; + rdev->rswitch->em_init = tsi568_em_init; + rdev->rswitch->em_handle = NULL; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI568, tsi568_switch_init); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c new file mode 100644 index 000000000000..d34df722d95f --- /dev/null +++ b/drivers/rapidio/switches/tsi57x.c @@ -0,0 +1,315 @@ +/* + * RapidIO Tsi57x switch family support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI578_SP_MODE(n) (0x11004 + n*0x100) +#define TSI578_SP_MODE_GLBL 0x10004 +#define TSI578_SP_MODE_PW_DIS 0x08000000 +#define TSI578_SP_MODE_LUT_512 0x01000000 + +#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) +#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) +#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) +#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) + +#define TSI578_GLBL_ROUTE_BASE 0x10078 + +static int +tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + /* Use local RT of the ingress port to avoid possible + race condition */ + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &result); + table = (result & RIO_SWP_INFO_PORT_NUM_MASK); + } + + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + + *route_port = (u8)result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + + /* Turn off flat (LUT_512) mode */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE_GLBL, ®val); + rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL, + regval & ~TSI578_SP_MODE_LUT_512); + /* Set switch domain base */ + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, + (u32)(sw_domain << 24)); + return 0; +} + +static int +tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, ®val); + + *sw_domain = (u8)(regval >> 24); + + return 0; +} + +static int +tsi57x_em_init(struct rio_dev *rdev) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 regval; + int portnum; + + pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount); + + for (portnum = 0; portnum < 16; portnum++) { + /* Make sure that Port-Writes are enabled (for all ports) */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_MODE(portnum), + regval & ~TSI578_SP_MODE_PW_DIS); + + /* Clear all pending interrupts */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + ®val); + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + regval & 0x07120214); + + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), + regval & 0x000700bd); + + /* Enable all interrupts to allow ports to send a port-write */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_CTL_INDEP(portnum), ®val); + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_CTL_INDEP(portnum), + regval | 0x000b0000); + + /* Skip next (odd) port if the current port is in x4 mode */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) + portnum++; + } + + return 0; +} + +static int +tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) +{ + struct rio_mport *mport = rdev->net->hport; + u16 destid = rdev->rswitch->destid; + u8 hopcount = rdev->rswitch->hopcount; + u32 intstat, err_status; + int sendcount, checkcount; + u8 route_port; + u32 regval; + + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + + if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && + (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | + RIO_PORT_N_ERR_STS_PW_INP_ES))) { + /* Remove any queued packets by locking/unlocking port */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval | RIO_PORT_N_CTL_LOCKOUT); + udelay(50); + rio_mport_write_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval); + } + + /* Read from link maintenance response register to clear + * valid bit + */ + rio_mport_read_config_32(mport, destid, hopcount, + rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + + /* Send a Packet-Not-Accepted/Link-Request-Input-Status control + * symbol to recover from IES/OES + */ + sendcount = 3; + while (sendcount) { + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_CS_TX(portnum), 0x40fc8000); + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_mport_read_config_32( + mport, destid, hopcount, + rdev->phys_efptr + + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) + goto exit_es; + } + + sendcount--; + } + } + +exit_es: + /* Clear implementation specific error status bits */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), &intstat); + pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", + destid, hopcount, portnum, intstat); + + if (intstat & 0x10000) { + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_LUT_PEINF(portnum), ®val); + regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); + route_port = rdev->rswitch->route_table[regval]; + pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", + rio_name(rdev), portnum, regval); + tsi57x_route_add_entry(mport, destid, hopcount, + RIO_GLOBAL_TABLE, regval, route_port); + } + + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_SP_INT_STATUS(portnum), + intstat & 0x000700bd); + + return 0; +} + +static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + rdev->rswitch->add_entry = tsi57x_route_add_entry; + rdev->rswitch->get_entry = tsi57x_route_get_entry; + rdev->rswitch->clr_table = tsi57x_route_clr_table; + rdev->rswitch->set_domain = tsi57x_set_domain; + rdev->rswitch->get_domain = tsi57x_get_domain; + rdev->rswitch->em_init = tsi57x_em_init; + rdev->rswitch->em_handle = tsi57x_em_handler; + + return 0; +} + +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI572, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI574, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI577, tsi57x_switch_init); +DECLARE_RIO_SWITCH_INIT(RIO_VID_TUNDRA, RIO_DID_TSI578, tsi57x_switch_init); diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index 1afd008ca957..7b14a67bdca2 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include /* LDO registers and some handy masking definitions for AB3100 */ #define AB3100_LDO_A 0x40 @@ -41,7 +41,7 @@ * struct ab3100_regulator * A struct passed around the individual regulator functions * @platform_device: platform device holding this regulator - * @ab3100: handle to the AB3100 parent chip + * @dev: handle to the device * @plfdata: AB3100 platform data passed in at probe time * @regreg: regulator register number in the AB3100 * @fixed_voltage: a fixed voltage for this regulator, if this @@ -52,7 +52,7 @@ */ struct ab3100_regulator { struct regulator_dev *rdev; - struct ab3100 *ab3100; + struct device *dev; struct ab3100_platform_data *plfdata; u8 regreg; int fixed_voltage; @@ -183,7 +183,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) int err; u8 regval; - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, "failed to get regid %d value\n", @@ -197,7 +197,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg) regval |= AB3100_REG_ON_MASK; - err = ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); if (err) { dev_warn(®->dev, "failed to set regid %d value\n", @@ -245,14 +245,14 @@ static int ab3100_disable_regulator(struct regulator_dev *reg) if (abreg->regreg == AB3100_LDO_D) { dev_info(®->dev, "disabling LDO D - shut down system\n"); /* Setting LDO D to 0x00 cuts the power to the SoC */ - return ab3100_set_register_interruptible(abreg->ab3100, + return abx500_set_register_interruptible(abreg->dev, 0, AB3100_LDO_D, 0x00U); } /* * All other regulators are handled here */ - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_err(®->dev, "unable to get register 0x%x\n", @@ -260,7 +260,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg) return err; } regval &= ~AB3100_REG_ON_MASK; - return ab3100_set_register_interruptible(abreg->ab3100, abreg->regreg, + return abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); } @@ -270,7 +270,7 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg) u8 regval; int err; - err = ab3100_get_register_interruptible(abreg->ab3100, abreg->regreg, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_err(®->dev, "unable to get register 0x%x\n", @@ -305,7 +305,7 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg) * For variable types, read out setting and index into * supplied voltage list. */ - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, @@ -373,7 +373,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg, if (bestindex < 0) return bestindex; - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, abreg->regreg, ®val); if (err) { dev_warn(®->dev, @@ -386,7 +386,7 @@ static int ab3100_set_voltage_regulator(struct regulator_dev *reg, regval &= ~0xE0; regval |= (bestindex << 5); - err = ab3100_set_register_interruptible(abreg->ab3100, + err = abx500_set_register_interruptible(abreg->dev, 0, abreg->regreg, regval); if (err) dev_warn(®->dev, "failed to set regulator register %02x\n", @@ -414,7 +414,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, /* LDO E and BUCK have special suspend voltages you can set */ bestindex = ab3100_get_best_voltage_index(reg, uV, uV); - err = ab3100_get_register_interruptible(abreg->ab3100, + err = abx500_get_register_interruptible(abreg->dev, 0, targetreg, ®val); if (err) { dev_warn(®->dev, @@ -427,7 +427,7 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg, regval &= ~0xE0; regval |= (bestindex << 5); - err = ab3100_set_register_interruptible(abreg->ab3100, + err = abx500_set_register_interruptible(abreg->dev, 0, targetreg, regval); if (err) dev_warn(®->dev, "failed to set regulator register %02x\n", @@ -574,13 +574,12 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { static int __devinit ab3100_regulators_probe(struct platform_device *pdev) { struct ab3100_platform_data *plfdata = pdev->dev.platform_data; - struct ab3100 *ab3100 = platform_get_drvdata(pdev); int err = 0; u8 data; int i; /* Check chip state */ - err = ab3100_get_register_interruptible(ab3100, + err = abx500_get_register_interruptible(&pdev->dev, 0, AB3100_LDO_D, &data); if (err) { dev_err(&pdev->dev, "could not read initial status of LDO_D\n"); @@ -595,7 +594,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) /* Set up regulators */ for (i = 0; i < ARRAY_SIZE(ab3100_reg_init_order); i++) { - err = ab3100_set_register_interruptible(ab3100, + err = abx500_set_register_interruptible(&pdev->dev, 0, ab3100_reg_init_order[i], plfdata->reg_initvals[i]); if (err) { @@ -617,7 +616,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev) * see what it looks like for a certain machine, go * into the machine I2C setup. */ - reg->ab3100 = ab3100; + reg->dev = &pdev->dev; reg->plfdata = plfdata; /* diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 74841abcc9cc..14b4576281c5 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -22,68 +22,9 @@ #include #include #include -#include #include #include - -/* Register definitions */ -#define TPS6507X_REG_PPATH1 0X01 -#define TPS6507X_REG_INT 0X02 -#define TPS6507X_REG_CHGCONFIG0 0X03 -#define TPS6507X_REG_CHGCONFIG1 0X04 -#define TPS6507X_REG_CHGCONFIG2 0X05 -#define TPS6507X_REG_CHGCONFIG3 0X06 -#define TPS6507X_REG_REG_ADCONFIG 0X07 -#define TPS6507X_REG_TSCMODE 0X08 -#define TPS6507X_REG_ADRESULT_1 0X09 -#define TPS6507X_REG_ADRESULT_2 0X0A -#define TPS6507X_REG_PGOOD 0X0B -#define TPS6507X_REG_PGOODMASK 0X0C -#define TPS6507X_REG_CON_CTRL1 0X0D -#define TPS6507X_REG_CON_CTRL2 0X0E -#define TPS6507X_REG_CON_CTRL3 0X0F -#define TPS6507X_REG_DEFDCDC1 0X10 -#define TPS6507X_REG_DEFDCDC2_LOW 0X11 -#define TPS6507X_REG_DEFDCDC2_HIGH 0X12 -#define TPS6507X_REG_DEFDCDC3_LOW 0X13 -#define TPS6507X_REG_DEFDCDC3_HIGH 0X14 -#define TPS6507X_REG_DEFSLEW 0X15 -#define TPS6507X_REG_LDO_CTRL1 0X16 -#define TPS6507X_REG_DEFLDO2 0X17 -#define TPS6507X_REG_WLED_CTRL1 0X18 -#define TPS6507X_REG_WLED_CTRL2 0X19 - -/* CON_CTRL1 bitfields */ -#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) -#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) -#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) -#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) -#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) - -/* DEFDCDC1 bitfields */ -#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7) -#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F - -/* DEFDCDC2_LOW bitfields */ -#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F - -/* DEFDCDC2_HIGH bitfields */ -#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F - -/* DEFDCDC3_LOW bitfields */ -#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F - -/* DEFDCDC3_HIGH bitfields */ -#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F - -/* TPS6507X_REG_LDO_CTRL1 bitfields */ -#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F - -/* TPS6507X_REG_DEFLDO2 bitfields */ -#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F - -/* VDCDC MASK */ -#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F +#include /* DCDC's */ #define TPS6507X_DCDC_1 0 @@ -162,101 +103,146 @@ struct tps_info { const u16 *table; }; -struct tps_pmic { +static const struct tps_info tps6507x_pmic_regs[] = { + { + .name = "VDCDC1", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "VDCDC2", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "VDCDC3", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), + .table = VDCDCx_VSEL_table, + }, + { + .name = "LDO1", + .min_uV = 1000000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(LDO1_VSEL_table), + .table = LDO1_VSEL_table, + }, + { + .name = "LDO2", + .min_uV = 725000, + .max_uV = 3300000, + .table_len = ARRAY_SIZE(LDO2_VSEL_table), + .table = LDO2_VSEL_table, + }, +}; + +struct tps6507x_pmic { struct regulator_desc desc[TPS6507X_NUM_REGULATOR]; - struct i2c_client *client; + struct tps6507x_dev *mfd; struct regulator_dev *rdev[TPS6507X_NUM_REGULATOR]; const struct tps_info *info[TPS6507X_NUM_REGULATOR]; struct mutex io_lock; }; - -static inline int tps_6507x_read(struct tps_pmic *tps, u8 reg) +static inline int tps6507x_pmic_read(struct tps6507x_pmic *tps, u8 reg) { - return i2c_smbus_read_byte_data(tps->client, reg); + u8 val; + int err; + + err = tps->mfd->read_dev(tps->mfd, reg, 1, &val); + + if (err) + return err; + + return val; } -static inline int tps_6507x_write(struct tps_pmic *tps, u8 reg, u8 val) +static inline int tps6507x_pmic_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { - return i2c_smbus_write_byte_data(tps->client, reg, val); + return tps->mfd->write_dev(tps->mfd, reg, 1, &val); } -static int tps_6507x_set_bits(struct tps_pmic *tps, u8 reg, u8 mask) +static int tps6507x_pmic_set_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) { - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data |= mask; - err = tps_6507x_write(tps, reg, data); + err = tps6507x_pmic_write(tps, reg, data); if (err) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } -static int tps_6507x_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask) +static int tps6507x_pmic_clear_bits(struct tps6507x_pmic *tps, u8 reg, u8 mask) { int err, data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) { - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); err = data; goto out; } data &= ~mask; - err = tps_6507x_write(tps, reg, data); + err = tps6507x_pmic_write(tps, reg, data); if (err) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); out: mutex_unlock(&tps->io_lock); return err; } -static int tps_6507x_reg_read(struct tps_pmic *tps, u8 reg) +static int tps6507x_pmic_reg_read(struct tps6507x_pmic *tps, u8 reg) { int data; mutex_lock(&tps->io_lock); - data = tps_6507x_read(tps, reg); + data = tps6507x_pmic_read(tps, reg); if (data < 0) - dev_err(&tps->client->dev, "Read from reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Read from reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return data; } -static int tps_6507x_reg_write(struct tps_pmic *tps, u8 reg, u8 val) +static int tps6507x_pmic_reg_write(struct tps6507x_pmic *tps, u8 reg, u8 val) { int err; mutex_lock(&tps->io_lock); - err = tps_6507x_write(tps, reg, val); + err = tps6507x_pmic_write(tps, reg, val); if (err < 0) - dev_err(&tps->client->dev, "Write for reg 0x%x failed\n", reg); + dev_err(tps->mfd->dev, "Write for reg 0x%x failed\n", reg); mutex_unlock(&tps->io_lock); return err; } -static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) +static int tps6507x_pmic_dcdc_is_enabled(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, dcdc = rdev_get_id(dev); u8 shift; @@ -264,7 +250,7 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) return -EINVAL; shift = TPS6507X_MAX_REG_ID - dcdc; - data = tps_6507x_reg_read(tps, TPS6507X_REG_CON_CTRL1); + data = tps6507x_pmic_reg_read(tps, TPS6507X_REG_CON_CTRL1); if (data < 0) return data; @@ -272,9 +258,9 @@ static int tps6507x_dcdc_is_enabled(struct regulator_dev *dev) return (data & 1<info[dcdc]->table[data] * 1000; } -static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, dcdc = rdev_get_id(dev); u8 reg; @@ -411,19 +399,19 @@ static int tps6507x_dcdc_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[dcdc]->table_len) return -EINVAL; - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~TPS6507X_DEFDCDCX_DCDC_MASK; data |= vsel; - return tps_6507x_reg_write(tps, reg, data); + return tps6507x_pmic_reg_write(tps, reg, data); } -static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) +static int tps6507x_pmic_ldo_get_voltage(struct regulator_dev *dev) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, ldo = rdev_get_id(dev); u8 reg, mask; @@ -437,7 +425,7 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) TPS6507X_REG_DEFLDO2_LDO2_MASK); } - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; @@ -445,10 +433,10 @@ static int tps6507x_ldo_get_voltage(struct regulator_dev *dev) return tps->info[ldo]->table[data] * 1000; } -static int tps6507x_ldo_set_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int data, vsel, ldo = rdev_get_id(dev); u8 reg, mask; @@ -479,20 +467,20 @@ static int tps6507x_ldo_set_voltage(struct regulator_dev *dev, if (vsel == tps->info[ldo]->table_len) return -EINVAL; - data = tps_6507x_reg_read(tps, reg); + data = tps6507x_pmic_reg_read(tps, reg); if (data < 0) return data; data &= ~mask; data |= vsel; - return tps_6507x_reg_write(tps, reg, data); + return tps6507x_pmic_reg_write(tps, reg, data); } -static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_dcdc_list_voltage(struct regulator_dev *dev, unsigned selector) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int dcdc = rdev_get_id(dev); if (dcdc < TPS6507X_DCDC_1 || dcdc > TPS6507X_DCDC_3) @@ -504,10 +492,10 @@ static int tps6507x_dcdc_list_voltage(struct regulator_dev *dev, return tps->info[dcdc]->table[selector] * 1000; } -static int tps6507x_ldo_list_voltage(struct regulator_dev *dev, +static int tps6507x_pmic_ldo_list_voltage(struct regulator_dev *dev, unsigned selector) { - struct tps_pmic *tps = rdev_get_drvdata(dev); + struct tps6507x_pmic *tps = rdev_get_drvdata(dev); int ldo = rdev_get_id(dev); if (ldo < TPS6507X_LDO_1 || ldo > TPS6507X_LDO_2) @@ -520,47 +508,54 @@ static int tps6507x_ldo_list_voltage(struct regulator_dev *dev, } /* Operations permitted on VDCDCx */ -static struct regulator_ops tps6507x_dcdc_ops = { - .is_enabled = tps6507x_dcdc_is_enabled, - .enable = tps6507x_dcdc_enable, - .disable = tps6507x_dcdc_disable, - .get_voltage = tps6507x_dcdc_get_voltage, - .set_voltage = tps6507x_dcdc_set_voltage, - .list_voltage = tps6507x_dcdc_list_voltage, +static struct regulator_ops tps6507x_pmic_dcdc_ops = { + .is_enabled = tps6507x_pmic_dcdc_is_enabled, + .enable = tps6507x_pmic_dcdc_enable, + .disable = tps6507x_pmic_dcdc_disable, + .get_voltage = tps6507x_pmic_dcdc_get_voltage, + .set_voltage = tps6507x_pmic_dcdc_set_voltage, + .list_voltage = tps6507x_pmic_dcdc_list_voltage, }; /* Operations permitted on LDOx */ -static struct regulator_ops tps6507x_ldo_ops = { - .is_enabled = tps6507x_ldo_is_enabled, - .enable = tps6507x_ldo_enable, - .disable = tps6507x_ldo_disable, - .get_voltage = tps6507x_ldo_get_voltage, - .set_voltage = tps6507x_ldo_set_voltage, - .list_voltage = tps6507x_ldo_list_voltage, +static struct regulator_ops tps6507x_pmic_ldo_ops = { + .is_enabled = tps6507x_pmic_ldo_is_enabled, + .enable = tps6507x_pmic_ldo_enable, + .disable = tps6507x_pmic_ldo_disable, + .get_voltage = tps6507x_pmic_ldo_get_voltage, + .set_voltage = tps6507x_pmic_ldo_set_voltage, + .list_voltage = tps6507x_pmic_ldo_list_voltage, }; -static int __devinit tps_6507x_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static __devinit +int tps6507x_pmic_probe(struct platform_device *pdev) { + struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); static int desc_id; - const struct tps_info *info = (void *)id->driver_data; + const struct tps_info *info = &tps6507x_pmic_regs[0]; struct regulator_init_data *init_data; struct regulator_dev *rdev; - struct tps_pmic *tps; + struct tps6507x_pmic *tps; + struct tps6507x_board *tps_board; int i; int error; - if (!i2c_check_functionality(client->adapter, - I2C_FUNC_SMBUS_BYTE_DATA)) - return -EIO; + /** + * tps_board points to pmic related constants + * coming from the board-evm file. + */ + + tps_board = dev_get_platdata(tps6507x_dev->dev); + if (!tps_board) + return -EINVAL; /** * init_data points to array of regulator_init structures * coming from the board-evm file. */ - init_data = client->dev.platform_data; + init_data = tps_board->tps6507x_pmic_init_data; if (!init_data) - return -EIO; + return -EINVAL; tps = kzalloc(sizeof(*tps), GFP_KERNEL); if (!tps) @@ -569,7 +564,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, mutex_init(&tps->io_lock); /* common for all regulators */ - tps->client = client; + tps->mfd = tps6507x_dev; for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) { /* Register the regulators */ @@ -578,15 +573,16 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, tps->desc[i].id = desc_id++; tps->desc[i].n_voltages = num_voltages[i]; tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? - &tps6507x_ldo_ops : &tps6507x_dcdc_ops); + &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); tps->desc[i].type = REGULATOR_VOLTAGE; tps->desc[i].owner = THIS_MODULE; rdev = regulator_register(&tps->desc[i], - &client->dev, init_data, tps); + tps6507x_dev->dev, init_data, tps); if (IS_ERR(rdev)) { - dev_err(&client->dev, "failed to register %s\n", - id->name); + dev_err(tps6507x_dev->dev, + "failed to register %s regulator\n", + pdev->name); error = PTR_ERR(rdev); goto fail; } @@ -595,7 +591,7 @@ static int __devinit tps_6507x_probe(struct i2c_client *client, tps->rdev[i] = rdev; } - i2c_set_clientdata(client, tps); + tps6507x_dev->pmic = tps; return 0; @@ -608,19 +604,17 @@ fail: } /** - * tps_6507x_remove - TPS6507x driver i2c remove handler + * tps6507x_remove - TPS6507x driver i2c remove handler * @client: i2c driver client device structure * * Unregister TPS driver as an i2c client device driver */ -static int __devexit tps_6507x_remove(struct i2c_client *client) +static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) { - struct tps_pmic *tps = i2c_get_clientdata(client); + struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); + struct tps6507x_pmic *tps = tps6507x_dev->pmic; int i; - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); - for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); @@ -629,83 +623,38 @@ static int __devexit tps_6507x_remove(struct i2c_client *client) return 0; } -static const struct tps_info tps6507x_regs[] = { - { - .name = "VDCDC1", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "VDCDC2", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "VDCDC3", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(VDCDCx_VSEL_table), - .table = VDCDCx_VSEL_table, - }, - { - .name = "LDO1", - .min_uV = 1000000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(LDO1_VSEL_table), - .table = LDO1_VSEL_table, - }, - { - .name = "LDO2", - .min_uV = 725000, - .max_uV = 3300000, - .table_len = ARRAY_SIZE(LDO2_VSEL_table), - .table = LDO2_VSEL_table, - }, -}; - -static const struct i2c_device_id tps_6507x_id[] = { - {.name = "tps6507x", - .driver_data = (unsigned long) tps6507x_regs,}, - { }, -}; -MODULE_DEVICE_TABLE(i2c, tps_6507x_id); - -static struct i2c_driver tps_6507x_i2c_driver = { +static struct platform_driver tps6507x_pmic_driver = { .driver = { - .name = "tps6507x", + .name = "tps6507x-pmic", .owner = THIS_MODULE, }, - .probe = tps_6507x_probe, - .remove = __devexit_p(tps_6507x_remove), - .id_table = tps_6507x_id, + .probe = tps6507x_pmic_probe, + .remove = __devexit_p(tps6507x_pmic_remove), }; /** - * tps_6507x_init + * tps6507x_pmic_init * * Module init function */ -static int __init tps_6507x_init(void) +static int __init tps6507x_pmic_init(void) { - return i2c_add_driver(&tps_6507x_i2c_driver); + return platform_driver_register(&tps6507x_pmic_driver); } -subsys_initcall(tps_6507x_init); +subsys_initcall(tps6507x_pmic_init); /** - * tps_6507x_cleanup + * tps6507x_pmic_cleanup * * Module exit function */ -static void __exit tps_6507x_cleanup(void) +static void __exit tps6507x_pmic_cleanup(void) { - i2c_del_driver(&tps_6507x_i2c_driver); + platform_driver_unregister(&tps6507x_pmic_driver); } -module_exit(tps_6507x_cleanup); +module_exit(tps6507x_pmic_cleanup); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TPS6507x voltage regulator driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:tps6507x-pmic"); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index f1598324344c..10ba12c8c5e0 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -611,6 +611,13 @@ config RTC_DRV_AB3100 Select this to enable the ST-Ericsson AB3100 Mixed Signal IC RTC support. This chip contains a battery- and capacitor-backed RTC. +config RTC_DRV_AB8500 + tristate "ST-Ericsson AB8500 RTC" + depends on AB8500_CORE + help + Select this to enable the ST-Ericsson AB8500 power management IC RTC + support. This chip contains a battery- and capacitor-backed RTC. + config RTC_DRV_NUC900 tristate "NUC910/NUC920 RTC driver" depends on RTC_CLASS && ARCH_W90X900 diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 245311a1348f..5adbba7cf89c 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -18,6 +18,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o # Keep the list ordered. obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o +obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c index 4704aac2b5af..d26780ea254b 100644 --- a/drivers/rtc/rtc-ab3100.c +++ b/drivers/rtc/rtc-ab3100.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include /* Clock rate in Hz */ #define AB3100_RTC_CLOCK_RATE 32768 @@ -45,7 +45,6 @@ */ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_TI0, AB3100_TI1, AB3100_TI2, AB3100_TI3, AB3100_TI4, AB3100_TI5}; unsigned char buf[6]; @@ -61,27 +60,26 @@ static int ab3100_rtc_set_mmss(struct device *dev, unsigned long secs) buf[5] = (fat_time >> 40) & 0xFF; for (i = 0; i < 6; i++) { - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(dev, 0, regs[i], buf[i]); if (err) return err; } /* Set the flag to mark that the clock is now set */ - return ab3100_mask_and_set_register_interruptible(ab3100_data, + return abx500_mask_and_set_register_interruptible(dev, 0, AB3100_RTC, - 0xFE, 0x01); + 0x01, 0x01); } static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u8 rtcval; int err; - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(dev, 0, AB3100_RTC, &rtcval); if (err) return err; @@ -94,7 +92,7 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) u8 buf[6]; /* Read out time registers */ - err = ab3100_get_register_page_interruptible(ab3100_data, + err = abx500_get_register_page_interruptible(dev, 0, AB3100_TI0, buf, 6); if (err != 0) @@ -114,7 +112,6 @@ static int ab3100_rtc_read_time(struct device *dev, struct rtc_time *tm) static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); unsigned long time; u64 fat_time; u8 buf[6]; @@ -122,7 +119,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) int err; /* Figure out if alarm is enabled or not */ - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(dev, 0, AB3100_RTC, &rtcval); if (err) return err; @@ -133,7 +130,7 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) /* No idea how this could be represented */ alarm->pending = 0; /* Read out alarm registers, only 4 bytes */ - err = ab3100_get_register_page_interruptible(ab3100_data, + err = abx500_get_register_page_interruptible(dev, 0, AB3100_AL0, buf, 4); if (err) return err; @@ -148,7 +145,6 @@ static int ab3100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); u8 regs[] = {AB3100_AL0, AB3100_AL1, AB3100_AL2, AB3100_AL3}; unsigned char buf[4]; unsigned long secs; @@ -165,21 +161,19 @@ static int ab3100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) /* Set the alarm */ for (i = 0; i < 4; i++) { - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(dev, 0, regs[i], buf[i]); if (err) return err; } /* Then enable the alarm */ - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), alarm->enabled << 2); } static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) { - struct ab3100 *ab3100_data = dev_get_drvdata(dev); - /* * It's not possible to enable/disable the alarm IRQ for this RTC. * It does not actually trigger any IRQ: instead its only function is @@ -188,12 +182,12 @@ static int ab3100_rtc_irq_enable(struct device *dev, unsigned int enabled) * and need to be handled there instead. */ if (enabled) - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), 1 << 2); else - return ab3100_mask_and_set_register_interruptible(ab3100_data, - AB3100_RTC, ~(1 << 2), + return abx500_mask_and_set_register_interruptible(dev, 0, + AB3100_RTC, (1 << 2), 0); } @@ -210,10 +204,9 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) int err; u8 regval; struct rtc_device *rtc; - struct ab3100 *ab3100_data = platform_get_drvdata(pdev); /* The first RTC register needs special treatment */ - err = ab3100_get_register_interruptible(ab3100_data, + err = abx500_get_register_interruptible(&pdev->dev, 0, AB3100_RTC, ®val); if (err) { dev_err(&pdev->dev, "unable to read RTC register\n"); @@ -231,7 +224,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) * This bit remains until RTC power is lost. */ regval = 1 | RTC_SETTING; - err = ab3100_set_register_interruptible(ab3100_data, + err = abx500_set_register_interruptible(&pdev->dev, 0, AB3100_RTC, regval); /* Ignore any error on this write */ } diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c new file mode 100644 index 000000000000..2fda03125e55 --- /dev/null +++ b/drivers/rtc/rtc-ab8500.c @@ -0,0 +1,363 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Virupax Sadashivpetimath + * + * RTC clock driver for the RTC part of the AB8500 Power management chip. + * Based on RTC clock driver for the AB3100 Analog Baseband Chip by + * Linus Walleij + */ + +#include +#include +#include +#include +#include +#include +#include + +#define AB8500_RTC_SOFF_STAT_REG 0x0F00 +#define AB8500_RTC_CC_CONF_REG 0x0F01 +#define AB8500_RTC_READ_REQ_REG 0x0F02 +#define AB8500_RTC_WATCH_TSECMID_REG 0x0F03 +#define AB8500_RTC_WATCH_TSECHI_REG 0x0F04 +#define AB8500_RTC_WATCH_TMIN_LOW_REG 0x0F05 +#define AB8500_RTC_WATCH_TMIN_MID_REG 0x0F06 +#define AB8500_RTC_WATCH_TMIN_HI_REG 0x0F07 +#define AB8500_RTC_ALRM_MIN_LOW_REG 0x0F08 +#define AB8500_RTC_ALRM_MIN_MID_REG 0x0F09 +#define AB8500_RTC_ALRM_MIN_HI_REG 0x0F0A +#define AB8500_RTC_STAT_REG 0x0F0B +#define AB8500_RTC_BKUP_CHG_REG 0x0F0C +#define AB8500_RTC_FORCE_BKUP_REG 0x0F0D +#define AB8500_RTC_CALIB_REG 0x0F0E +#define AB8500_RTC_SWITCH_STAT_REG 0x0F0F +#define AB8500_REV_REG 0x1080 + +/* RtcReadRequest bits */ +#define RTC_READ_REQUEST 0x01 +#define RTC_WRITE_REQUEST 0x02 + +/* RtcCtrl bits */ +#define RTC_ALARM_ENA 0x04 +#define RTC_STATUS_DATA 0x01 + +#define COUNTS_PER_SEC (0xF000 / 60) +#define AB8500_RTC_EPOCH 2000 + +static const unsigned long ab8500_rtc_time_regs[] = { + AB8500_RTC_WATCH_TMIN_HI_REG, AB8500_RTC_WATCH_TMIN_MID_REG, + AB8500_RTC_WATCH_TMIN_LOW_REG, AB8500_RTC_WATCH_TSECHI_REG, + AB8500_RTC_WATCH_TSECMID_REG +}; + +static const unsigned long ab8500_rtc_alarm_regs[] = { + AB8500_RTC_ALRM_MIN_HI_REG, AB8500_RTC_ALRM_MIN_MID_REG, + AB8500_RTC_ALRM_MIN_LOW_REG +}; + +/* Calculate the seconds from 1970 to 01-01-2000 00:00:00 */ +static unsigned long get_elapsed_seconds(int year) +{ + unsigned long secs; + struct rtc_time tm = { + .tm_year = year - 1900, + .tm_mday = 1, + }; + + /* + * This function calculates secs from 1970 and not from + * 1900, even if we supply the offset from year 1900. + */ + rtc_tm_to_time(&tm, &secs); + return secs; +} + +static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + unsigned long timeout = jiffies + HZ; + int retval, i; + unsigned long mins, secs; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; + + /* Request a data read */ + retval = ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, + RTC_READ_REQUEST); + if (retval < 0) + return retval; + + /* Early AB8500 chips will not clear the rtc read request bit */ + if (ab8500->revision == 0) { + msleep(1); + } else { + /* Wait for some cycles after enabling the rtc read in ab8500 */ + while (time_before(jiffies, timeout)) { + retval = ab8500_read(ab8500, AB8500_RTC_READ_REQ_REG); + if (retval < 0) + return retval; + + if (!(retval & RTC_READ_REQUEST)) + break; + + msleep(1); + } + } + + /* Read the Watchtime registers */ + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { + retval = ab8500_read(ab8500, ab8500_rtc_time_regs[i]); + if (retval < 0) + return retval; + buf[i] = retval; + } + + mins = (buf[0] << 16) | (buf[1] << 8) | buf[2]; + + secs = (buf[3] << 8) | buf[4]; + secs = secs / COUNTS_PER_SEC; + secs = secs + (mins * 60); + + /* Add back the initially subtracted number of seconds */ + secs += get_elapsed_seconds(AB8500_RTC_EPOCH); + + rtc_time_to_tm(secs, tm); + return rtc_valid_tm(tm); +} + +static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; + unsigned long no_secs, no_mins, secs = 0; + + if (tm->tm_year < (AB8500_RTC_EPOCH - 1900)) { + dev_dbg(dev, "year should be equal to or greater than %d\n", + AB8500_RTC_EPOCH); + return -EINVAL; + } + + /* Get the number of seconds since 1970 */ + rtc_tm_to_time(tm, &secs); + + /* + * Convert it to the number of seconds since 01-01-2000 00:00:00, since + * we only have a small counter in the RTC. + */ + secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); + + no_mins = secs / 60; + + no_secs = secs % 60; + /* Make the seconds count as per the RTC resolution */ + no_secs = no_secs * COUNTS_PER_SEC; + + buf[4] = no_secs & 0xFF; + buf[3] = (no_secs >> 8) & 0xFF; + + buf[2] = no_mins & 0xFF; + buf[1] = (no_mins >> 8) & 0xFF; + buf[0] = (no_mins >> 16) & 0xFF; + + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_time_regs); i++) { + retval = ab8500_write(ab8500, ab8500_rtc_time_regs[i], buf[i]); + if (retval < 0) + return retval; + } + + /* Request a data write */ + return ab8500_write(ab8500, AB8500_RTC_READ_REQ_REG, RTC_WRITE_REQUEST); +} + +static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + int rtc_ctrl; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; + unsigned long secs, mins; + + /* Check if the alarm is enabled or not */ + rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG); + if (rtc_ctrl < 0) + return rtc_ctrl; + + if (rtc_ctrl & RTC_ALARM_ENA) + alarm->enabled = 1; + else + alarm->enabled = 0; + + alarm->pending = 0; + + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { + retval = ab8500_read(ab8500, ab8500_rtc_alarm_regs[i]); + if (retval < 0) + return retval; + buf[i] = retval; + } + + mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]); + secs = mins * 60; + + /* Add back the initially subtracted number of seconds */ + secs += get_elapsed_seconds(AB8500_RTC_EPOCH); + + rtc_time_to_tm(secs, &alarm->time); + + return rtc_valid_tm(&alarm->time); +} + +static int ab8500_rtc_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + + return ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_ALARM_ENA, + enabled ? RTC_ALARM_ENA : 0); +} + +static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); + int retval, i; + unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)]; + unsigned long mins, secs = 0; + + if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) { + dev_dbg(dev, "year should be equal to or greater than %d\n", + AB8500_RTC_EPOCH); + return -EINVAL; + } + + /* Get the number of seconds since 1970 */ + rtc_tm_to_time(&alarm->time, &secs); + + /* + * Convert it to the number of seconds since 01-01-2000 00:00:00, since + * we only have a small counter in the RTC. + */ + secs -= get_elapsed_seconds(AB8500_RTC_EPOCH); + + mins = secs / 60; + + buf[2] = mins & 0xFF; + buf[1] = (mins >> 8) & 0xFF; + buf[0] = (mins >> 16) & 0xFF; + + /* Set the alarm time */ + for (i = 0; i < ARRAY_SIZE(ab8500_rtc_alarm_regs); i++) { + retval = ab8500_write(ab8500, ab8500_rtc_alarm_regs[i], buf[i]); + if (retval < 0) + return retval; + } + + return ab8500_rtc_irq_enable(dev, alarm->enabled); +} + +static irqreturn_t rtc_alarm_handler(int irq, void *data) +{ + struct rtc_device *rtc = data; + unsigned long events = RTC_IRQF | RTC_AF; + + dev_dbg(&rtc->dev, "%s\n", __func__); + rtc_update_irq(rtc, 1, events); + + return IRQ_HANDLED; +} + +static const struct rtc_class_ops ab8500_rtc_ops = { + .read_time = ab8500_rtc_read_time, + .set_time = ab8500_rtc_set_time, + .read_alarm = ab8500_rtc_read_alarm, + .set_alarm = ab8500_rtc_set_alarm, + .alarm_irq_enable = ab8500_rtc_irq_enable, +}; + +static int __devinit ab8500_rtc_probe(struct platform_device *pdev) +{ + struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); + int err; + struct rtc_device *rtc; + int rtc_ctrl; + int irq; + + irq = platform_get_irq_byname(pdev, "ALARM"); + if (irq < 0) + return irq; + + /* For RTC supply test */ + err = ab8500_set_bits(ab8500, AB8500_RTC_STAT_REG, RTC_STATUS_DATA, + RTC_STATUS_DATA); + if (err < 0) + return err; + + /* Wait for reset by the PorRtc */ + msleep(1); + + rtc_ctrl = ab8500_read(ab8500, AB8500_RTC_STAT_REG); + if (rtc_ctrl < 0) + return rtc_ctrl; + + /* Check if the RTC Supply fails */ + if (!(rtc_ctrl & RTC_STATUS_DATA)) { + dev_err(&pdev->dev, "RTC supply failure\n"); + return -ENODEV; + } + + rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + dev_err(&pdev->dev, "Registration failed\n"); + err = PTR_ERR(rtc); + return err; + } + + err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0, + "ab8500-rtc", rtc); + if (err < 0) { + rtc_device_unregister(rtc); + return err; + } + + platform_set_drvdata(pdev, rtc); + + return 0; +} + +static int __devexit ab8500_rtc_remove(struct platform_device *pdev) +{ + struct rtc_device *rtc = platform_get_drvdata(pdev); + int irq = platform_get_irq_byname(pdev, "ALARM"); + + free_irq(irq, rtc); + rtc_device_unregister(rtc); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver ab8500_rtc_driver = { + .driver = { + .name = "ab8500-rtc", + .owner = THIS_MODULE, + }, + .probe = ab8500_rtc_probe, + .remove = __devexit_p(ab8500_rtc_remove), +}; + +static int __init ab8500_rtc_init(void) +{ + return platform_driver_register(&ab8500_rtc_driver); +} + +static void __exit ab8500_rtc_exit(void) +{ + platform_driver_unregister(&ab8500_rtc_driver); +} + +module_init(ab8500_rtc_init); +module_exit(ab8500_rtc_exit); +MODULE_AUTHOR("Virupax Sadashivpetimath "); +MODULE_DESCRIPTION("AB8500 RTC Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 038095d99976..6dc4e6241418 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -595,10 +595,6 @@ static void wdt_disable(void) static ssize_t wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - /* Can't seek (pwrite) on this device - if (ppos != &file->f_pos) - return -ESPIPE; - */ if (count) { wdt_ping(); return 1; @@ -707,7 +703,7 @@ static int wdt_open(struct inode *inode, struct file *file) */ wdt_is_open = 1; unlock_kernel(); - return 0; + return nonseekable_open(inode, file); } return -ENODEV; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 0e86247d791e..33975e922d65 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1186,6 +1186,29 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, dasd_schedule_device_bh(device); } +enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) +{ + struct dasd_device *device; + + device = dasd_device_from_cdev_locked(cdev); + + if (IS_ERR(device)) + goto out; + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || + device->state != device->target || + !device->discipline->handle_unsolicited_interrupt){ + dasd_put_device(device); + goto out; + } + + dasd_device_clear_timer(device); + device->discipline->handle_unsolicited_interrupt(device, irb); + dasd_put_device(device); +out: + return UC_TODO_RETRY; +} +EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); + /* * If we have an error on a dasd_block layer request then we cancel * and return all further requests from the same dasd_block as well. diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 5b1cd8d6e971..ab84da5592e8 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -3436,6 +3436,7 @@ static struct ccw_driver dasd_eckd_driver = { .freeze = dasd_generic_pm_freeze, .thaw = dasd_generic_restore_device, .restore = dasd_generic_restore_device, + .uc_handler = dasd_generic_uc_handler, }; /* diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 32fac186ba3f..49b431d135e0 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -617,6 +617,7 @@ int dasd_generic_notify(struct ccw_device *, int); void dasd_generic_handle_state_change(struct dasd_device *); int dasd_generic_pm_freeze(struct ccw_device *); int dasd_generic_restore_device(struct ccw_device *); +enum uc_todo dasd_generic_uc_handler(struct ccw_device *, struct irb *); int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); char *dasd_get_sense(struct irb *); diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 5f97ea2ee6b1..97b25d68e3e7 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -123,8 +123,10 @@ ccwgroup_release (struct device *dev) for (i = 0; i < gdev->count; i++) { if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); } } @@ -262,11 +264,14 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, goto error; } /* Don't allow a device to belong to more than one group. */ + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev)) { + spin_unlock_irq(gdev->cdev[i]->ccwlock); rc = -EINVAL; goto error; } dev_set_drvdata(&gdev->cdev[i]->dev, gdev); + spin_unlock_irq(gdev->cdev[i]->ccwlock); } /* Check for sufficient number of bus ids. */ if (i < num_devices && !curr_buf) { @@ -303,8 +308,10 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, error: for (i = 0; i < num_devices; i++) if (gdev->cdev[i]) { + spin_lock_irq(gdev->cdev[i]->ccwlock); if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) dev_set_drvdata(&gdev->cdev[i]->dev, NULL); + spin_unlock_irq(gdev->cdev[i]->ccwlock); put_device(&gdev->cdev[i]->dev); gdev->cdev[i] = NULL; } diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c index 37df42af05ec..7f206ed44fdf 100644 --- a/drivers/s390/cio/ccwreq.c +++ b/drivers/s390/cio/ccwreq.c @@ -159,6 +159,7 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) { struct irb *irb = &cdev->private->irb; struct cmd_scsw *scsw = &irb->scsw.cmd; + enum uc_todo todo; /* Perform BASIC SENSE if needed. */ if (ccw_device_accumulate_and_sense(cdev, lcirb)) @@ -178,6 +179,20 @@ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) /* Check for command reject. */ if (irb->ecw[0] & SNS0_CMD_REJECT) return IO_REJECTED; + /* Ask the driver what to do */ + if (cdev->drv && cdev->drv->uc_handler) { + todo = cdev->drv->uc_handler(cdev, lcirb); + switch (todo) { + case UC_TODO_RETRY: + return IO_STATUS_ERROR; + case UC_TODO_RETRY_ON_NEW_PATH: + return IO_PATH_ERROR; + case UC_TODO_STOP: + return IO_REJECTED; + default: + return IO_STATUS_ERROR; + } + } /* Assume that unexpected SENSE data implies an error. */ return IO_STATUS_ERROR; } diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 759262792633..fac06155773f 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -23,21 +23,6 @@ struct tpi_info { * Some S390 specific IO instructions as inline */ -static inline int stsch(struct subchannel_id schid, struct schib *addr) -{ - register struct subchannel_id reg1 asm ("1") = schid; - int ccode; - - asm volatile( - " stsch 0(%3)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode), "=m" (*addr) - : "d" (reg1), "a" (addr) - : "cc"); - return ccode; -} - static inline int stsch_err(struct subchannel_id schid, struct schib *addr) { register struct subchannel_id reg1 asm ("1") = schid; diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 308541ff85cf..1bb5d3f0e260 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -1,34 +1,31 @@ #include -#include -#include -#include #include #include +#include +#include +#include +#include -#include #include #include #include #include -#include -#include -#include #include "scsi.h" -#include #include "wd33c93.h" #include "a2091.h" -#include - -static int a2091_release(struct Scsi_Host *instance); +struct a2091_hostdata { + struct WD33C93_hostdata wh; + struct a2091_scsiregs *regs; +}; static irqreturn_t a2091_intr(int irq, void *data) { struct Scsi_Host *instance = data; - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); - unsigned int status = regs->ISTR; + struct a2091_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; unsigned long flags; if (!(status & (ISTR_INT_F | ISTR_INT_P)) || !(status & ISTR_INTS)) @@ -43,38 +40,39 @@ static irqreturn_t a2091_intr(int irq, void *data) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; - struct WD33C93_hostdata *hdata = shost_priv(instance); - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); /* don't allow DMA if the physical address is bad */ if (addr & A2091_XFER_MASK) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; - hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, - GFP_KERNEL); + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); /* can't allocate memory; use PIO */ - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } /* get the physical address of the bounce buffer */ - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); /* the bounce buffer may not be in the first 16M of physmem */ if (addr & A2091_XFER_MASK) { /* we could use chipmem... maybe later */ - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } @@ -84,7 +82,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cntr |= CNTR_DDIR; /* remember direction */ - hdata->dma_dir = dir_in; + wh->dma_dir = dir_in; regs->CNTR = cntr; @@ -108,20 +106,21 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct a2091_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a2091_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; - if (!hdata->dma_dir) + if (!wh->dma_dir) cntr |= CNTR_DDIR; /* disable SCSI interrupts */ regs->CNTR = cntr; /* flush if we were reading */ - if (hdata->dma_dir) { + if (wh->dma_dir) { regs->FLUSH = 1; while (!(regs->ISTR & ISTR_FE_FLG)) ; @@ -137,95 +136,37 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, regs->CNTR = CNTR_PDMD | CNTR_INTEN; /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { - if (hdata->dma_dir) - memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; - } -} - -static int __init a2091_detect(struct scsi_host_template *tpnt) -{ - static unsigned char called = 0; - struct Scsi_Host *instance; - unsigned long address; - struct zorro_dev *z = NULL; - wd33c93_regs wdregs; - a2091_scsiregs *regs; - struct WD33C93_hostdata *hdata; - int num_a2091 = 0; - - if (!MACH_IS_AMIGA || called) - return 0; - called = 1; - - tpnt->proc_name = "A2091"; - tpnt->proc_info = &wd33c93_proc_info; - - while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { - if (z->id != ZORRO_PROD_CBM_A590_A2091_1 && - z->id != ZORRO_PROD_CBM_A590_A2091_2) - continue; - address = z->resource.start; - if (!request_mem_region(address, 256, "wd33c93")) - continue; - - instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (instance == NULL) - goto release; - instance->base = ZTWO_VADDR(address); - instance->irq = IRQ_AMIGA_PORTS; - instance->unique_id = z->slotaddr; - regs = (a2091_scsiregs *)(instance->base); - regs->DAWR = DAWR_A2091; - wdregs.SASR = ®s->SASR; - wdregs.SCMD = ®s->SCMD; - hdata = shost_priv(instance); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(instance, wdregs, dma_setup, dma_stop, - WD33C93_FS_8_10); - if (request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, - "A2091 SCSI", instance)) - goto unregister; - regs->CNTR = CNTR_PDMD | CNTR_INTEN; - num_a2091++; - continue; - -unregister: - scsi_unregister(instance); -release: - release_mem_region(address, 256); + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } - - return num_a2091; } static int a2091_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: kill this function, and let midlayer fall back to the same action, calling wd33c93_host_reset() */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } -#define HOSTS_C - -static struct scsi_host_template driver_template = { - .proc_name = "A2901", +static struct scsi_host_template a2091_scsi_template = { + .module = THIS_MODULE, .name = "Commodore A2091/A590 SCSI", - .detect = a2091_detect, - .release = a2091_release, + .proc_info = wd33c93_proc_info, + .proc_name = "A2901", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a2091_bus_reset, @@ -237,19 +178,103 @@ static struct scsi_host_template driver_template = { .use_clustering = DISABLE_CLUSTERING }; +static int __devinit a2091_probe(struct zorro_dev *z, + const struct zorro_device_id *ent) +{ + struct Scsi_Host *instance; + int error; + struct a2091_scsiregs *regs; + wd33c93_regs wdregs; + struct a2091_hostdata *hdata; -#include "scsi_module.c" + if (!request_mem_region(z->resource.start, 256, "wd33c93")) + return -EBUSY; -static int a2091_release(struct Scsi_Host *instance) + instance = scsi_host_alloc(&a2091_scsi_template, + sizeof(struct a2091_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); + regs->DAWR = DAWR_A2091; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10); + error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED, + "A2091 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(z->resource.start, 256); + return error; +} + +static void __devexit a2091_remove(struct zorro_dev *z) { -#ifdef MODULE - a2091_scsiregs *regs = (a2091_scsiregs *)(instance->base); + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct a2091_hostdata *hdata = shost_priv(instance); - regs->CNTR = 0; - release_mem_region(ZTWO_PADDR(instance->base), 256); + hdata->regs->CNTR = 0; + scsi_remove_host(instance); free_irq(IRQ_AMIGA_PORTS, instance); -#endif - return 1; + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + +static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_CBM_A590_A2091_1 }, + { ZORRO_PROD_CBM_A590_A2091_2 }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, a2091_zorro_tbl); + +static struct zorro_driver a2091_driver = { + .name = "a2091", + .id_table = a2091_zorro_tbl, + .probe = a2091_probe, + .remove = __devexit_p(a2091_remove), +}; + +static int __init a2091_init(void) +{ + return zorro_register_driver(&a2091_driver); +} +module_init(a2091_init); + +static void __exit a2091_exit(void) +{ + zorro_unregister_driver(&a2091_driver); } +module_exit(a2091_exit); +MODULE_DESCRIPTION("Commodore A2091/A590 SCSI"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/a2091.h b/drivers/scsi/a2091.h index 1c3daa1fd754..794b8e65c711 100644 --- a/drivers/scsi/a2091.h +++ b/drivers/scsi/a2091.h @@ -25,7 +25,7 @@ */ #define A2091_XFER_MASK (0xff000001) -typedef struct { +struct a2091_scsiregs { unsigned char pad1[64]; volatile unsigned short ISTR; volatile unsigned short CNTR; @@ -44,7 +44,7 @@ typedef struct { volatile unsigned short CINT; unsigned char pad7[2]; volatile unsigned short FLUSH; -} a2091_scsiregs; +}; #define DAWR_A2091 (3) diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index bc6eb69f5fd0..d9468027fb61 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -1,53 +1,52 @@ #include #include -#include -#include #include #include +#include #include #include +#include -#include #include #include #include #include -#include #include "scsi.h" -#include #include "wd33c93.h" #include "a3000.h" -#include - -#define DMA(ptr) ((a3000_scsiregs *)((ptr)->base)) - -static struct Scsi_Host *a3000_host = NULL; - -static int a3000_release(struct Scsi_Host *instance); +struct a3000_hostdata { + struct WD33C93_hostdata wh; + struct a3000_scsiregs *regs; +}; -static irqreturn_t a3000_intr(int irq, void *dummy) +static irqreturn_t a3000_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + struct a3000_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->ISTR; unsigned long flags; - unsigned int status = DMA(a3000_host)->ISTR; if (!(status & ISTR_INT_P)) return IRQ_NONE; if (status & ISTR_INTS) { - spin_lock_irqsave(a3000_host->host_lock, flags); - wd33c93_intr(a3000_host); - spin_unlock_irqrestore(a3000_host->host_lock, flags); + spin_lock_irqsave(instance->host_lock, flags); + wd33c93_intr(instance); + spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } - printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); + pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); return IRQ_NONE; } static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { - struct WD33C93_hostdata *hdata = shost_priv(a3000_host); + struct Scsi_Host *instance = cmd->device->host; + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -58,23 +57,23 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) * buffer */ if (addr & A3000_XFER_MASK) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; - hdata->dma_bounce_buffer = kmalloc(hdata->dma_bounce_len, - GFP_KERNEL); + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, + GFP_KERNEL); /* can't allocate memory; use PIO */ - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); } /* setup dma direction */ @@ -82,12 +81,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cntr |= CNTR_DDIR; /* remember direction */ - hdata->dma_dir = dir_in; + wh->dma_dir = dir_in; - DMA(a3000_host)->CNTR = cntr; + regs->CNTR = cntr; /* setup DMA *physical* address */ - DMA(a3000_host)->ACR = addr; + regs->ACR = addr; if (dir_in) { /* invalidate any cache */ @@ -99,7 +98,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) /* start DMA */ mb(); /* make sure setup is completed */ - DMA(a3000_host)->ST_DMA = 1; + regs->ST_DMA = 1; mb(); /* make sure DMA has started before next IO */ /* return success */ @@ -109,22 +108,24 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct a3000_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct a3000_scsiregs *regs = hdata->regs; /* disable SCSI interrupts */ unsigned short cntr = CNTR_PDMD; - if (!hdata->dma_dir) + if (!wh->dma_dir) cntr |= CNTR_DDIR; - DMA(instance)->CNTR = cntr; + regs->CNTR = cntr; mb(); /* make sure CNTR is updated before next IO */ /* flush if we were reading */ - if (hdata->dma_dir) { - DMA(instance)->FLUSH = 1; + if (wh->dma_dir) { + regs->FLUSH = 1; mb(); /* don't allow prefetch */ - while (!(DMA(instance)->ISTR & ISTR_FE_FLG)) + while (!(regs->ISTR & ISTR_FE_FLG)) barrier(); mb(); /* no IO until FLUSH is done */ } @@ -133,96 +134,54 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, /* I think that this CINT is only necessary if you are * using the terminal count features. HM 7 Mar 1994 */ - DMA(instance)->CINT = 1; + regs->CINT = 1; /* stop DMA */ - DMA(instance)->SP_DMA = 1; + regs->SP_DMA = 1; mb(); /* make sure DMA is stopped before next IO */ /* restore the CONTROL bits (minus the direction flag) */ - DMA(instance)->CNTR = CNTR_PDMD | CNTR_INTEN; + regs->CNTR = CNTR_PDMD | CNTR_INTEN; mb(); /* make sure CNTR is updated before next IO */ /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { + if (status && wh->dma_bounce_buffer) { if (SCpnt) { - if (hdata->dma_dir && SCpnt) - memcpy(SCpnt->SCp.ptr, - hdata->dma_bounce_buffer, + if (wh->dma_dir && SCpnt) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } else { - kfree(hdata->dma_bounce_buffer); - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; + kfree(wh->dma_bounce_buffer); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } } } -static int __init a3000_detect(struct scsi_host_template *tpnt) -{ - wd33c93_regs regs; - struct WD33C93_hostdata *hdata; - - if (!MACH_IS_AMIGA || !AMIGAHW_PRESENT(A3000_SCSI)) - return 0; - if (!request_mem_region(0xDD0000, 256, "wd33c93")) - return 0; - - tpnt->proc_name = "A3000"; - tpnt->proc_info = &wd33c93_proc_info; - - a3000_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (a3000_host == NULL) - goto fail_register; - - a3000_host->base = ZTWO_VADDR(0xDD0000); - a3000_host->irq = IRQ_AMIGA_PORTS; - DMA(a3000_host)->DAWR = DAWR_A3000; - regs.SASR = &(DMA(a3000_host)->SASR); - regs.SCMD = &(DMA(a3000_host)->SCMD); - hdata = shost_priv(a3000_host); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(a3000_host, regs, dma_setup, dma_stop, WD33C93_FS_12_15); - if (request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, "A3000 SCSI", - a3000_intr)) - goto fail_irq; - DMA(a3000_host)->CNTR = CNTR_PDMD | CNTR_INTEN; - - return 1; - -fail_irq: - scsi_unregister(a3000_host); -fail_register: - release_mem_region(0xDD0000, 256); - return 0; -} - static int a3000_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: kill this entire function, which should cause mid-layer to call wd33c93_host_reset anyway? */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } -#define HOSTS_C - -static struct scsi_host_template driver_template = { - .proc_name = "A3000", +static struct scsi_host_template amiga_a3000_scsi_template = { + .module = THIS_MODULE, .name = "Amiga 3000 built-in SCSI", - .detect = a3000_detect, - .release = a3000_release, + .proc_info = wd33c93_proc_info, + .proc_name = "A3000", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = a3000_bus_reset, @@ -234,15 +193,104 @@ static struct scsi_host_template driver_template = { .use_clustering = ENABLE_CLUSTERING }; +static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) +{ + struct resource *res; + struct Scsi_Host *instance; + int error; + struct a3000_scsiregs *regs; + wd33c93_regs wdregs; + struct a3000_hostdata *hdata; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (!request_mem_region(res->start, resource_size(res), "wd33c93")) + return -EBUSY; + + instance = scsi_host_alloc(&amiga_a3000_scsi_template, + sizeof(struct a3000_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; -#include "scsi_module.c" + regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); + regs->DAWR = DAWR_A3000; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15); + error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED, + "A3000 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = CNTR_PDMD | CNTR_INTEN; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + platform_set_drvdata(pdev, instance); + + scsi_scan_host(instance); + return 0; + +fail_host: + free_irq(IRQ_AMIGA_PORTS, instance); +fail_irq: + scsi_host_put(instance); +fail_alloc: + release_mem_region(res->start, resource_size(res)); + return error; +} + +static int __exit amiga_a3000_scsi_remove(struct platform_device *pdev) +{ + struct Scsi_Host *instance = platform_get_drvdata(pdev); + struct a3000_hostdata *hdata = shost_priv(instance); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(res->start, resource_size(res)); + return 0; +} + +static struct platform_driver amiga_a3000_scsi_driver = { + .remove = __exit_p(amiga_a3000_scsi_remove), + .driver = { + .name = "amiga-a3000-scsi", + .owner = THIS_MODULE, + }, +}; + +static int __init amiga_a3000_scsi_init(void) +{ + return platform_driver_probe(&amiga_a3000_scsi_driver, + amiga_a3000_scsi_probe); +} +module_init(amiga_a3000_scsi_init); -static int a3000_release(struct Scsi_Host *instance) +static void __exit amiga_a3000_scsi_exit(void) { - DMA(instance)->CNTR = 0; - release_mem_region(0xDD0000, 256); - free_irq(IRQ_AMIGA_PORTS, a3000_intr); - return 1; + platform_driver_unregister(&amiga_a3000_scsi_driver); } +module_exit(amiga_a3000_scsi_exit); +MODULE_DESCRIPTION("Amiga 3000 built-in SCSI"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a3000-scsi"); diff --git a/drivers/scsi/a3000.h b/drivers/scsi/a3000.h index 684813ee378c..49db4a335aab 100644 --- a/drivers/scsi/a3000.h +++ b/drivers/scsi/a3000.h @@ -25,7 +25,7 @@ */ #define A3000_XFER_MASK (0x00000003) -typedef struct { +struct a3000_scsiregs { unsigned char pad1[2]; volatile unsigned short DAWR; volatile unsigned int WTC; @@ -46,7 +46,7 @@ typedef struct { volatile unsigned char SASR; unsigned char pad9; volatile unsigned char SCMD; -} a3000_scsiregs; +}; #define DAWR_A3000 (3) diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 11ae6be8aeaf..23c76f41883c 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -20,10 +20,6 @@ #include "53c700.h" -MODULE_AUTHOR("Alan Hourihane / Kars de Jong "); -MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); -MODULE_LICENSE("GPL"); - static struct scsi_host_template a4000t_scsi_driver_template = { .name = "A4000T builtin SCSI", @@ -32,30 +28,35 @@ static struct scsi_host_template a4000t_scsi_driver_template = { .module = THIS_MODULE, }; -static struct platform_device *a4000t_scsi_device; -#define A4000T_SCSI_ADDR 0xdd0040 +#define A4000T_SCSI_OFFSET 0x40 -static int __devinit a4000t_probe(struct platform_device *dev) +static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev) { - struct Scsi_Host *host; + struct resource *res; + phys_addr_t scsi_addr; struct NCR_700_Host_Parameters *hostdata; + struct Scsi_Host *host; - if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI))) - goto out; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; - if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000, + if (!request_mem_region(res->start, resource_size(res), "A4000T builtin SCSI")) - goto out; + return -EBUSY; - hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL); + hostdata = kzalloc(sizeof(struct NCR_700_Host_Parameters), + GFP_KERNEL); if (!hostdata) { - printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n"); + dev_err(&pdev->dev, "Failed to allocate host data\n"); goto out_release; } + scsi_addr = res->start + A4000T_SCSI_OFFSET; + /* Fill in the required pieces of hostdata */ - hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR); + hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); hostdata->clock = 50; hostdata->chip710 = 1; hostdata->dmode_extra = DMODE_FC2; @@ -63,26 +64,25 @@ static int __devinit a4000t_probe(struct platform_device *dev) /* and register the chip */ host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, - &dev->dev); + &pdev->dev); if (!host) { - printk(KERN_ERR "a4000t-scsi: No host detected; " - "board configuration problem?\n"); + dev_err(&pdev->dev, + "No host detected; board configuration problem?\n"); goto out_free; } host->this_id = 7; - host->base = A4000T_SCSI_ADDR; + host->base = scsi_addr; host->irq = IRQ_AMIGA_PORTS; if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi", host)) { - printk(KERN_ERR "a4000t-scsi: request_irq failed\n"); + dev_err(&pdev->dev, "request_irq failed\n"); goto out_put_host; } - platform_set_drvdata(dev, host); + platform_set_drvdata(pdev, host); scsi_scan_host(host); - return 0; out_put_host: @@ -90,58 +90,49 @@ static int __devinit a4000t_probe(struct platform_device *dev) out_free: kfree(hostdata); out_release: - release_mem_region(A4000T_SCSI_ADDR, 0x1000); - out: + release_mem_region(res->start, resource_size(res)); return -ENODEV; } -static __devexit int a4000t_device_remove(struct platform_device *dev) +static int __exit amiga_a4000t_scsi_remove(struct platform_device *pdev) { - struct Scsi_Host *host = platform_get_drvdata(dev); + struct Scsi_Host *host = platform_get_drvdata(pdev); struct NCR_700_Host_Parameters *hostdata = shost_priv(host); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); scsi_remove_host(host); - NCR_700_release(host); kfree(hostdata); free_irq(host->irq, host); - release_mem_region(A4000T_SCSI_ADDR, 0x1000); - + release_mem_region(res->start, resource_size(res)); return 0; } -static struct platform_driver a4000t_scsi_driver = { - .driver = { - .name = "a4000t-scsi", - .owner = THIS_MODULE, +static struct platform_driver amiga_a4000t_scsi_driver = { + .remove = __exit_p(amiga_a4000t_scsi_remove), + .driver = { + .name = "amiga-a4000t-scsi", + .owner = THIS_MODULE, }, - .probe = a4000t_probe, - .remove = __devexit_p(a4000t_device_remove), }; -static int __init a4000t_scsi_init(void) +static int __init amiga_a4000t_scsi_init(void) { - int err; - - err = platform_driver_register(&a4000t_scsi_driver); - if (err) - return err; - - a4000t_scsi_device = platform_device_register_simple("a4000t-scsi", - -1, NULL, 0); - if (IS_ERR(a4000t_scsi_device)) { - platform_driver_unregister(&a4000t_scsi_driver); - return PTR_ERR(a4000t_scsi_device); - } - - return err; + return platform_driver_probe(&amiga_a4000t_scsi_driver, + amiga_a4000t_scsi_probe); } -static void __exit a4000t_scsi_exit(void) +module_init(amiga_a4000t_scsi_init); + +static void __exit amiga_a4000t_scsi_exit(void) { - platform_device_unregister(a4000t_scsi_device); - platform_driver_unregister(&a4000t_scsi_driver); + platform_driver_unregister(&amiga_a4000t_scsi_driver); } -module_init(a4000t_scsi_init); -module_exit(a4000t_scsi_exit); +module_exit(amiga_a4000t_scsi_exit); + +MODULE_AUTHOR("Alan Hourihane / " + "Kars de Jong "); +MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:amiga-a4000t-scsi"); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 9c0c91178538..1a5bf5724750 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -655,9 +655,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { - kfree (usg); - dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", + dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); + kfree(usg); rcode = -ENOMEM; goto cleanup; } diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ab646e580d64..ce5371b3cdd5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -48,7 +48,7 @@ struct device_attribute; /*The limit of outstanding scsi command that firmware can handle*/ #define ARCMSR_MAX_OUTSTANDING_CMD 256 #define ARCMSR_MAX_FREECCB_NUM 320 -#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" +#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/11/03" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -110,6 +110,8 @@ struct CMD_MESSAGE_FIELD #define FUNCTION_SAY_HELLO 0x0807 #define FUNCTION_SAY_GOODBYE 0x0808 #define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809 +#define FUNCTION_GET_FIRMWARE_STATUS 0x080A +#define FUNCTION_HARDWARE_RESET 0x080B /* ARECA IO CONTROL CODE*/ #define ARCMSR_MESSAGE_READ_RQBUFFER \ ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER @@ -133,6 +135,7 @@ struct CMD_MESSAGE_FIELD #define ARCMSR_MESSAGE_RETURNCODE_OK 0x00000001 #define ARCMSR_MESSAGE_RETURNCODE_ERROR 0x00000006 #define ARCMSR_MESSAGE_RETURNCODE_3F 0x0000003F +#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON 0x00000088 /* ************************************************************* ** structure for holding DMA address data @@ -341,13 +344,13 @@ struct MessageUnit_B uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; uint32_t postq_index; uint32_t doneq_index; - void __iomem *drv2iop_doorbell_reg; - void __iomem *drv2iop_doorbell_mask_reg; - void __iomem *iop2drv_doorbell_reg; - void __iomem *iop2drv_doorbell_mask_reg; - void __iomem *msgcode_rwbuffer_reg; - void __iomem *ioctl_wbuffer_reg; - void __iomem *ioctl_rbuffer_reg; + uint32_t __iomem *drv2iop_doorbell_reg; + uint32_t __iomem *drv2iop_doorbell_mask_reg; + uint32_t __iomem *iop2drv_doorbell_reg; + uint32_t __iomem *iop2drv_doorbell_mask_reg; + uint32_t __iomem *msgcode_rwbuffer_reg; + uint32_t __iomem *ioctl_wbuffer_reg; + uint32_t __iomem *ioctl_rbuffer_reg; }; /* @@ -375,6 +378,7 @@ struct AdapterControlBlock /* message unit ATU inbound base address0 */ uint32_t acb_flags; + uint8_t adapter_index; #define ACB_F_SCSISTOPADAPTER 0x0001 #define ACB_F_MSG_STOP_BGRB 0x0002 /* stop RAID background rebuild */ @@ -390,7 +394,7 @@ struct AdapterControlBlock #define ACB_F_BUS_RESET 0x0080 #define ACB_F_IOP_INITED 0x0100 /* iop init */ - + #define ACB_F_FIRMWARE_TRAP 0x0400 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; /* used for memory free */ struct list_head ccb_free_list; @@ -423,12 +427,19 @@ struct AdapterControlBlock #define ARECA_RAID_GOOD 0xaa uint32_t num_resets; uint32_t num_aborts; + uint32_t signature; uint32_t firm_request_len; uint32_t firm_numbers_queue; uint32_t firm_sdram_size; uint32_t firm_hd_channels; char firm_model[12]; char firm_version[20]; + char device_map[20]; /*21,84-99*/ + struct work_struct arcmsr_do_message_isr_bh; + struct timer_list eternal_timer; + unsigned short fw_state; + atomic_t rq_map_token; + int ante_token_value; };/* HW_DEVICE_EXTENSION */ /* ******************************************************************************* diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index a4e04c50c436..07fdfe57e38e 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c @@ -192,6 +192,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = { .attr = { .name = "mu_read", .mode = S_IRUSR , + .owner = THIS_MODULE, }, .size = 1032, .read = arcmsr_sysfs_iop_message_read, @@ -201,6 +202,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = { .attr = { .name = "mu_write", .mode = S_IWUSR, + .owner = THIS_MODULE, }, .size = 1032, .write = arcmsr_sysfs_iop_message_write, @@ -210,6 +212,7 @@ static struct bin_attribute arcmsr_sysfs_message_clear_attr = { .attr = { .name = "mu_clear", .mode = S_IWUSR, + .owner = THIS_MODULE, }, .size = 1, .write = arcmsr_sysfs_iop_message_clear, diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index ffbe2192da3c..ffa54792bb33 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -72,8 +72,16 @@ #include #include "arcmsr.h" +#ifdef CONFIG_SCSI_ARCMSR_RESET + static int sleeptime = 20; + static int retrycount = 12; + module_param(sleeptime, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset"); + module_param(retrycount, int, S_IRUGO|S_IWUSR); + MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset"); +#endif MODULE_AUTHOR("Erich Chen "); -MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter"); +MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); @@ -96,6 +104,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); +static void arcmsr_request_device_map(unsigned long pacb); +static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); +static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); +static void arcmsr_message_isr_bh_fn(struct work_struct *work); +static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode); +static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); + static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, @@ -112,7 +127,7 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, static struct scsi_host_template arcmsr_scsi_host_template = { .module = THIS_MODULE, - .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter" + .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter" ARCMSR_DRIVER_VERSION, .info = arcmsr_info, .queuecommand = arcmsr_queue_command, @@ -128,16 +143,6 @@ static struct scsi_host_template arcmsr_scsi_host_template = { .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, }; -#ifdef CONFIG_SCSI_ARCMSR_AER -static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev); -static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state); - -static struct pci_error_handlers arcmsr_pci_error_handlers = { - .error_detected = arcmsr_pci_error_detected, - .slot_reset = arcmsr_pci_slot_reset, -}; -#endif static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, @@ -166,9 +171,6 @@ static struct pci_driver arcmsr_pci_driver = { .probe = arcmsr_probe, .remove = arcmsr_remove, .shutdown = arcmsr_shutdown, - #ifdef CONFIG_SCSI_ARCMSR_AER - .err_handler = &arcmsr_pci_error_handlers, - #endif }; static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) @@ -236,10 +238,9 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) void *dma_coherent; dma_addr_t dma_coherent_handle, dma_addr; struct CommandControlBlock *ccb_tmp; - uint32_t intmask_org; int i, j; - acb->pmuA = pci_ioremap_bar(pdev, 0); + acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!acb->pmuA) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); @@ -281,12 +282,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GONE; - - /* - ** here we need to tell iop 331 our ccb_tmp.HighPart - ** if ccb_tmp.HighPart is not zero - */ - intmask_org = arcmsr_disable_outbound_ints(acb); } break; @@ -297,7 +292,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) void __iomem *mem_base0, *mem_base1; void *dma_coherent; dma_addr_t dma_coherent_handle, dma_addr; - uint32_t intmask_org; struct CommandControlBlock *ccb_tmp; int i, j; @@ -333,11 +327,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) reg = (struct MessageUnit_B *)(dma_coherent + ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); acb->pmuB = reg; - mem_base0 = pci_ioremap_bar(pdev, 0); + mem_base0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); if (!mem_base0) goto out; - mem_base1 = pci_ioremap_bar(pdev, 2); + mem_base1 = ioremap(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); if (!mem_base1) { iounmap(mem_base0); goto out; @@ -357,12 +353,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GOOD; - - /* - ** here we need to tell iop 331 our ccb_tmp.HighPart - ** if ccb_tmp.HighPart is not zero - */ - intmask_org = arcmsr_disable_outbound_ints(acb); } break; } @@ -374,6 +364,88 @@ out: sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); return -ENOMEM; } +static void arcmsr_message_isr_bh_fn(struct work_struct *work) +{ + struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh); + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + + struct MessageUnit_A __iomem *reg = acb->pmuA; + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = (uint32_t __iomem *) (®->message_rwbuffer[0]); + char __iomem *devicemap = (char __iomem *) (®->message_rwbuffer[21]); + int target, lun; + struct scsi_device *psdev; + char diff; + + atomic_inc(&acb->rq_map_token); + if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { + diff = (*acb_dev_map)^readb(devicemap); + if (diff != 0) { + char temp; + *acb_dev_map = readb(devicemap); + temp = *acb_dev_map; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { + if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { + scsi_add_device(acb->host, 0, target, lun); + } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { + psdev = scsi_device_lookup(acb->host, 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + diff >>= 1; + } + } + devicemap++; + acb_dev_map++; + } + } + break; + } + + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + char *acb_dev_map = (char *)acb->device_map; + uint32_t __iomem *signature = (uint32_t __iomem *)(®->msgcode_rwbuffer_reg[0]); + char __iomem *devicemap = (char __iomem *)(®->msgcode_rwbuffer_reg[21]); + int target, lun; + struct scsi_device *psdev; + char diff; + + atomic_inc(&acb->rq_map_token); + if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { + for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { + diff = (*acb_dev_map)^readb(devicemap); + if (diff != 0) { + char temp; + *acb_dev_map = readb(devicemap); + temp = *acb_dev_map; + for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { + if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { + scsi_add_device(acb->host, 0, target, lun); + } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { + psdev = scsi_device_lookup(acb->host, 0, target, lun); + if (psdev != NULL) { + scsi_remove_device(psdev); + scsi_device_put(psdev); + } + } + temp >>= 1; + diff >>= 1; + } + } + devicemap++; + acb_dev_map++; + } + } + } + } +} static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -432,17 +504,17 @@ static int arcmsr_probe(struct pci_dev *pdev, ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); - + INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); error = arcmsr_alloc_ccb_pool(acb); if (error) goto out_release_regions; + arcmsr_iop_init(acb); error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); if (error) goto out_free_ccb_pool; - arcmsr_iop_init(acb); pci_set_drvdata(pdev, host); if (strncmp(acb->firm_version, "V1.42", 5) >= 0) host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B; @@ -459,6 +531,14 @@ static int arcmsr_probe(struct pci_dev *pdev, #ifdef CONFIG_SCSI_ARCMSR_AER pci_enable_pcie_error_reporting(pdev); #endif + atomic_set(&acb->rq_map_token, 16); + acb->fw_state = true; + init_timer(&acb->eternal_timer); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ); + acb->eternal_timer.data = (unsigned long) acb; + acb->eternal_timer.function = &arcmsr_request_device_map; + add_timer(&acb->eternal_timer); + return 0; out_free_sysfs: out_free_irq: @@ -518,40 +598,48 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) return 0xff; } -static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); - if (arcmsr_hba_wait_msgint_ready(acb)) + if (arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); + return 0xff; + } + return 0x00; } -static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); - if (arcmsr_hbb_wait_msgint_ready(acb)) + if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); + return 0xff; + } + return 0x00; } -static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) +static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { + uint8_t rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_abort_hba_allcmd(acb); + rtnval = arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_abort_hbb_allcmd(acb); + rtnval = arcmsr_abort_hbb_allcmd(acb); } } + return rtnval; } static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) @@ -649,8 +737,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; - orig_mask = readl(®->outbound_intmask)|\ - ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; + orig_mask = readl(®->outbound_intmask); writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ ®->outbound_intmask); } @@ -658,8 +745,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; - orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ - (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); + orig_mask = readl(reg->iop2drv_doorbell_mask_reg); writel(0, reg->iop2drv_doorbell_mask_reg); } break; @@ -795,12 +881,13 @@ static void arcmsr_remove(struct pci_dev *pdev) struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int poll_count = 0; - arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); + flush_scheduled_work(); + del_timer_sync(&acb->eternal_timer); + arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); - arcmsr_disable_outbound_ints(acb); acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; @@ -841,7 +928,9 @@ static void arcmsr_shutdown(struct pci_dev *pdev) struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; - + del_timer_sync(&acb->eternal_timer); + arcmsr_disable_outbound_ints(acb); + flush_scheduled_work(); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } @@ -861,7 +950,7 @@ static void arcmsr_module_exit(void) module_init(arcmsr_module_init); module_exit(arcmsr_module_exit); -static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ +static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org) { u32 mask; @@ -871,7 +960,8 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | - ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); + ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| + ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); writel(mask, ®->outbound_intmask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } @@ -879,8 +969,10 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; - mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ - ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); + mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | + ARCMSR_IOP2DRV_DATA_READ_OK | + ARCMSR_IOP2DRV_CDB_DONE | + ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); writel(mask, reg->iop2drv_doorbell_mask_reg); acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } @@ -1048,8 +1140,8 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; - iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); - iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); + iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); + iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); dma_free_coherent(&acb->pdev->dev, (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 + sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle); @@ -1249,13 +1341,36 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) reg->doneq_index = index; } } +/* +********************************************************************************** +** Handle a message interrupt +** +** The only message interrupt we expect is in response to a query for the current adapter config. +** We want this in order to compare the drivemap so that we can detect newly-attached drives. +********************************************************************************** +*/ +static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A *reg = acb->pmuA; + + /*clear interrupt and message state*/ + writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, ®->outbound_intstatus); + schedule_work(&acb->arcmsr_do_message_isr_bh); +} +static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + /*clear interrupt and message state*/ + writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); + schedule_work(&acb->arcmsr_do_message_isr_bh); +} static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; - outbound_intstatus = readl(®->outbound_intstatus) & \ + outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { return 1; @@ -1267,6 +1382,10 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } + if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { + /* messenger of "driver to iop commands" */ + arcmsr_hba_message_isr(acb); + } return 0; } @@ -1275,13 +1394,14 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) uint32_t outbound_doorbell; struct MessageUnit_B *reg = acb->pmuB; - outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ + outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & acb->outbound_int_enable; if (!outbound_doorbell) return 1; writel(~outbound_doorbell, reg->iop2drv_doorbell_reg); - /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ + /*in case the last action of doorbell interrupt clearance is cached, + this action can push HW to write down the clear bit*/ readl(reg->iop2drv_doorbell_reg); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg); if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { @@ -1293,6 +1413,10 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } + if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { + /* messenger of "driver to iop commands" */ + arcmsr_hbb_message_isr(acb); + } return 0; } @@ -1360,7 +1484,7 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) } } -static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ +static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; @@ -1398,6 +1522,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } + + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } + ptmpQbuffer = ver_addr; while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { @@ -1444,6 +1575,12 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } + ptmpuserbuffer = ver_addr; user_len = pcmdmessagefld->cmdmessage.Length; memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); @@ -1496,6 +1633,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { uint8_t *pQbuffer = acb->rqbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1511,6 +1653,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { uint8_t *pQbuffer = acb->wqbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1529,6 +1676,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { uint8_t *pQbuffer; + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; @@ -1551,13 +1703,22 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ break; case ARCMSR_MESSAGE_RETURN_CODE_3F: { + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; - + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; @@ -1565,10 +1726,20 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ break; case ARCMSR_MESSAGE_SAY_GOODBYE: + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: + if (!acb->fw_state) { + pcmdmessagefld->cmdmessage.ReturnCode = + ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; + goto message_out; + } arcmsr_flush_adapter_cache(acb); break; @@ -1651,16 +1822,57 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, struct CommandControlBlock *ccb; int target = cmd->device->id; int lun = cmd->device->lun; - + uint8_t scsicmd = cmd->cmnd[0]; cmd->scsi_done = done; cmd->host_scribble = NULL; cmd->result = 0; + + if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) { + if (acb->devstate[target][lun] == ARECA_RAID_GONE) { + cmd->result = (DID_NO_CONNECT << 16); + } + cmd->scsi_done(cmd); + return 0; + } + if (acb->acb_flags & ACB_F_BUS_RESET) { - printk(KERN_NOTICE "arcmsr%d: bus reset" - " and return busy \n" - , acb->host->host_no); + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t intmask_org, outbound_doorbell; + + if ((readl(®->outbound_msgaddr1) & + ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { + printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n", + acb->host->host_no); return SCSI_MLQUEUE_HOST_BUSY; } + + acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n", + acb->host->host_no); + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_get_firmware_spec(acb, 1); + /*start background rebuild*/ + arcmsr_start_adapter_bgrb(acb); + /* clear Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + /*clear interrupt */ + writel(outbound_doorbell, ®->outbound_doorbell); + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, + ®->inbound_doorbell); + /* enable outbound Post Queue,outbound doorbell Interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + acb->acb_flags |= ACB_F_IOP_INITED; + acb->acb_flags &= ~ACB_F_BUS_RESET; + } + break; + case ACB_ADAPTER_TYPE_B: { + } + } + } + if (target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, cmd); @@ -1699,21 +1911,25 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, return 0; } -static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) +static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode) { struct MessageUnit_A __iomem *reg = acb->pmuA; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; + char *acb_device_map = acb->device_map; char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); + char __iomem *iop_device_map = (char __iomem *) (®->message_rwbuffer[21]); int count; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); if (arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); + return NULL; } + if (mode == 1) { count = 8; while (count) { *acb_firm_model = readb(iop_firm_model); @@ -1730,34 +1946,48 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) count--; } + count = 16; + while (count) { + *acb_device_map = readb(iop_device_map); + acb_device_map++; + iop_device_map++; + count--; + } + printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n" , acb->host->host_no , acb->firm_version); - + acb->signature = readl(®->message_rwbuffer[0]); acb->firm_request_len = readl(®->message_rwbuffer[1]); acb->firm_numbers_queue = readl(®->message_rwbuffer[2]); acb->firm_sdram_size = readl(®->message_rwbuffer[3]); acb->firm_hd_channels = readl(®->message_rwbuffer[4]); } - -static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) + return reg->message_rwbuffer; +} +static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode) { struct MessageUnit_B *reg = acb->pmuB; uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; + char *acb_device_map = acb->device_map; char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); /*firm_model,15,60-67*/ char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); /*firm_version,17,68-83*/ + char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]); + /*firm_version,21,84-99*/ int count; writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); if (arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); + return NULL; } + if (mode == 1) { count = 8; while (count) { @@ -1776,11 +2006,20 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) count--; } + count = 16; + while (count) { + *acb_device_map = readb(iop_device_map); + acb_device_map++; + iop_device_map++; + count--; + } + printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->host->host_no, acb->firm_version); - lrwbuffer++; + acb->signature = readl(lrwbuffer++); + /*firm_signature,1,00-03*/ acb->firm_request_len = readl(lrwbuffer++); /*firm_request_len,1,04-07*/ acb->firm_numbers_queue = readl(lrwbuffer++); @@ -1790,20 +2029,23 @@ static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) acb->firm_hd_channels = readl(lrwbuffer); /*firm_ide_channels,4,16-19*/ } - -static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) + return reg->msgcode_rwbuffer_reg; +} +static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode) { + void *rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { - arcmsr_get_hba_config(acb); + rtnval = arcmsr_get_hba_config(acb, mode); } break; case ACB_ADAPTER_TYPE_B: { - arcmsr_get_hbb_config(acb); + rtnval = arcmsr_get_hbb_config(acb, mode); } break; } + return rtnval; } static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, @@ -2043,6 +2285,66 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) } } +static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) +{ + struct MessageUnit_A __iomem *reg = acb->pmuA; + + if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { + acb->fw_state = false; + } else { + /*to prevent rq_map_token from changing by other interrupt, then + avoid the dead-lock*/ + acb->fw_state = true; + atomic_dec(&acb->rq_map_token); + if (!(acb->fw_state) || + (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { + atomic_set(&acb->rq_map_token, 16); + } + acb->ante_token_value = atomic_read(&acb->rq_map_token); + writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); + } + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); + return; +} + +static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B __iomem *reg = acb->pmuB; + + if (unlikely(atomic_read(&acb->rq_map_token) == 0)) { + acb->fw_state = false; + } else { + /*to prevent rq_map_token from changing by other interrupt, then + avoid the dead-lock*/ + acb->fw_state = true; + atomic_dec(&acb->rq_map_token); + if (!(acb->fw_state) || + (acb->ante_token_value == atomic_read(&acb->rq_map_token))) { + atomic_set(&acb->rq_map_token, 16); + } + acb->ante_token_value = atomic_read(&acb->rq_map_token); + writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg); + } + mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000)); + return; +} + +static void arcmsr_request_device_map(unsigned long pacb) +{ + struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; + + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + arcmsr_request_hba_device_map(acb); + } + break; + case ACB_ADAPTER_TYPE_B: { + arcmsr_request_hbb_device_map(acb); + } + break; + } +} + static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; @@ -2121,6 +2423,60 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) return; } +static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) +{ + uint8_t value[64]; + int i; + + /* backup pci config data */ + for (i = 0; i < 64; i++) { + pci_read_config_byte(acb->pdev, i, &value[i]); + } + /* hardware reset signal */ + pci_write_config_byte(acb->pdev, 0x84, 0x20); + msleep(1000); + /* write back pci config data */ + for (i = 0; i < 64; i++) { + pci_write_config_byte(acb->pdev, i, value[i]); + } + msleep(1000); + return; +} +/* +**************************************************************************** +**************************************************************************** +*/ +#ifdef CONFIG_SCSI_ARCMSR_RESET + int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd) + { + struct Scsi_Host *shost = NULL; + spinlock_t *host_lock = NULL; + int i, isleep; + + shost = cmd->device->host; + host_lock = shost->host_lock; + + printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n", + shost->host_no, sleeptime, shost->host_busy, shost->can_queue); + isleep = sleeptime / 10; + spin_unlock_irq(host_lock); + if (isleep > 0) { + for (i = 0; i < isleep; i++) { + msleep(10000); + printk(KERN_NOTICE "^%d^\n", i); + } + } + + isleep = sleeptime % 10; + if (isleep > 0) { + msleep(isleep * 1000); + printk(KERN_NOTICE "^v^\n"); + } + spin_lock_irq(host_lock); + printk(KERN_NOTICE "***** wake up *****\n"); + return 0; + } +#endif static void arcmsr_iop_init(struct AdapterControlBlock *acb) { uint32_t intmask_org; @@ -2129,7 +2485,7 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); - arcmsr_get_firmware_spec(acb); + arcmsr_get_firmware_spec(acb, 1); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ @@ -2140,51 +2496,110 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb) acb->acb_flags |= ACB_F_IOP_INITED; } -static void arcmsr_iop_reset(struct AdapterControlBlock *acb) +static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *ccb; uint32_t intmask_org; + uint8_t rtnval = 0x00; int i = 0; if (atomic_read(&acb->ccboutstandingcount) != 0) { + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); /* talk to iop 331 outstanding command aborted */ - arcmsr_abort_allcmd(acb); - + rtnval = arcmsr_abort_allcmd(acb); /* wait for 3 sec for all command aborted*/ ssleep(3); - - /* disable all outbound interrupt */ - intmask_org = arcmsr_disable_outbound_ints(acb); /* clear all outbound posted Q */ arcmsr_done4abort_postqueue(acb); for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { - ccb->startdone = ARCMSR_CCB_ABORTED; arcmsr_ccb_complete(ccb, 1); } } + atomic_set(&acb->ccboutstandingcount, 0); /* enable all outbound interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); + return rtnval; } + return rtnval; } static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)cmd->device->host->hostdata; - int i; + int retry = 0; - acb->num_resets++; + if (acb->acb_flags & ACB_F_BUS_RESET) + return SUCCESS; + + printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index); acb->acb_flags |= ACB_F_BUS_RESET; - for (i = 0; i < 400; i++) { - if (!atomic_read(&acb->ccboutstandingcount)) + acb->num_resets++; + while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) { + arcmsr_interrupt(acb); + retry++; + } + + if (arcmsr_iop_reset(acb)) { + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_A: { + printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n", + acb->adapter_index, acb->num_resets, acb->num_aborts); + arcmsr_hardware_reset(acb); + acb->acb_flags |= ACB_F_FIRMWARE_TRAP; + acb->acb_flags &= ~ACB_F_IOP_INITED; + #ifdef CONFIG_SCSI_ARCMSR_RESET + struct MessageUnit_A __iomem *reg = acb->pmuA; + uint32_t intmask_org, outbound_doorbell; + int retry_count = 0; +sleep_again: + arcmsr_sleep_for_bus_reset(cmd); + if ((readl(®->outbound_msgaddr1) & + ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n", + acb->host->host_no, retry_count); + if (retry_count > retrycount) { + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n", + acb->host->host_no); + return SUCCESS; + } + retry_count++; + goto sleep_again; + } + acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP; + acb->acb_flags |= ACB_F_IOP_INITED; + acb->acb_flags &= ~ACB_F_BUS_RESET; + printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n", + acb->host->host_no); + /* disable all outbound interrupt */ + intmask_org = arcmsr_disable_outbound_ints(acb); + arcmsr_get_firmware_spec(acb, 1); + /*start background rebuild*/ + arcmsr_start_adapter_bgrb(acb); + /* clear Qbuffer if door bell ringed */ + outbound_doorbell = readl(®->outbound_doorbell); + writel(outbound_doorbell, ®->outbound_doorbell); /*clear interrupt */ + writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); + /* enable outbound Post Queue,outbound doorbell Interrupt */ + arcmsr_enable_outbound_ints(acb, intmask_org); + atomic_set(&acb->rq_map_token, 16); + init_timer(&acb->eternal_timer); + acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ); + acb->eternal_timer.data = (unsigned long) acb; + acb->eternal_timer.function = &arcmsr_request_device_map; + add_timer(&acb->eternal_timer); + #endif + } break; - arcmsr_interrupt(acb);/* FIXME: need spinlock */ - msleep(25); + case ACB_ADAPTER_TYPE_B: { } - arcmsr_iop_reset(acb); + } + } else { acb->acb_flags &= ~ACB_F_BUS_RESET; + } return SUCCESS; } @@ -2277,98 +2692,3 @@ static const char *arcmsr_info(struct Scsi_Host *host) ARCMSR_DRIVER_VERSION); return buf; } -#ifdef CONFIG_SCSI_ARCMSR_AER -static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = - (struct AdapterControlBlock *) host->hostdata; - uint32_t intmask_org; - int i, j; - - if (pci_enable_device(pdev)) { - return PCI_ERS_RESULT_DISCONNECT; - } - pci_set_master(pdev); - intmask_org = arcmsr_disable_outbound_ints(acb); - acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | - ACB_F_MESSAGE_RQBUFFER_CLEARED | - ACB_F_MESSAGE_WQBUFFER_READED); - acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; - for (i = 0; i < ARCMSR_MAX_TARGETID; i++) - for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) - acb->devstate[i][j] = ARECA_RAID_GONE; - - arcmsr_wait_firmware_ready(acb); - arcmsr_iop_confirm(acb); - /* disable all outbound interrupt */ - arcmsr_get_firmware_spec(acb); - /*start background rebuild*/ - arcmsr_start_adapter_bgrb(acb); - /* empty doorbell Qbuffer if door bell ringed */ - arcmsr_clear_doorbell_queue_buffer(acb); - arcmsr_enable_eoi_mode(acb); - /* enable outbound Post Queue,outbound doorbell Interrupt */ - arcmsr_enable_outbound_ints(acb, intmask_org); - acb->acb_flags |= ACB_F_IOP_INITED; - - pci_enable_pcie_error_reporting(pdev); - return PCI_ERS_RESULT_RECOVERED; -} - -static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; - struct CommandControlBlock *ccb; - uint32_t intmask_org; - int i = 0; - - if (atomic_read(&acb->ccboutstandingcount) != 0) { - /* talk to iop 331 outstanding command aborted */ - arcmsr_abort_allcmd(acb); - /* wait for 3 sec for all command aborted*/ - ssleep(3); - /* disable all outbound interrupt */ - intmask_org = arcmsr_disable_outbound_ints(acb); - /* clear all outbound posted Q */ - arcmsr_done4abort_postqueue(acb); - for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { - ccb = acb->pccb_pool[i]; - if (ccb->startdone == ARCMSR_CCB_START) { - ccb->startdone = ARCMSR_CCB_ABORTED; - arcmsr_ccb_complete(ccb, 1); - } - } - /* enable all outbound interrupt */ - arcmsr_enable_outbound_ints(acb, intmask_org); - } - pci_disable_device(pdev); -} - -static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev) -{ - struct Scsi_Host *host = pci_get_drvdata(pdev); - struct AdapterControlBlock *acb = \ - (struct AdapterControlBlock *)host->hostdata; - - arcmsr_stop_adapter_bgrb(acb); - arcmsr_flush_adapter_cache(acb); -} - -static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - switch (state) { - case pci_channel_io_frozen: - arcmsr_pci_ers_need_reset_forepart(pdev); - return PCI_ERS_RESULT_NEED_RESET; - case pci_channel_io_perm_failure: - arcmsr_pci_ers_disconnect_forepart(pdev); - return PCI_ERS_RESULT_DISCONNECT; - break; - default: - return PCI_ERS_RESULT_NEED_RESET; - } -} -#endif diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index e641922f20bc..350cbeaae160 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -167,10 +167,9 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { SE_DEBUG(DBG_LVL_1, - "Failed to allocate memory for" - "mgmt_invalidate_icds \n"); + "Failed to allocate memory for mgmt_invalidate_icds\n"); spin_unlock(&ctrl->mbox_lock); - return -1; + return 0; } nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); req = nonemb_cmd.va; diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 0c08e185a766..3a7b3f88932f 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -84,11 +84,32 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) for (i = 0; hal_mods[i]; i++) hal_mods[i]->meminfo(cfg, &km_len, &dm_len); + dm_len += bfa_port_meminfo(); meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len; meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; } +static void +bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) +{ + struct bfa_port_s *port = &bfa->modules.port; + uint32_t dm_len; + uint8_t *dm_kva; + uint64_t dm_pa; + + dm_len = bfa_port_meminfo(); + dm_kva = bfa_meminfo_dma_virt(mi); + dm_pa = bfa_meminfo_dma_phys(mi); + + memset(port, 0, sizeof(struct bfa_port_s)); + bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm); + bfa_port_mem_claim(port, dm_kva, dm_pa); + + bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; + bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; +} + /** * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization @@ -140,6 +161,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, for (i = 0; hal_mods[i]; i++) hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev); + bfa_com_port_attach(bfa, meminfo); } /** diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 18b7102bb80e..2ce26eb7a1ec 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -1,36 +1,35 @@ #include -#include -#include -#include #include #include +#include +#include +#include +#include -#include #include #include #include #include -#include -#include -#include #include "scsi.h" -#include #include "wd33c93.h" #include "gvp11.h" -#include +#define CHECK_WD33C93 -#define DMA(ptr) ((gvp11_scsiregs *)((ptr)->base)) +struct gvp11_hostdata { + struct WD33C93_hostdata wh; + struct gvp11_scsiregs *regs; +}; -static irqreturn_t gvp11_intr(int irq, void *_instance) +static irqreturn_t gvp11_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + struct gvp11_hostdata *hdata = shost_priv(instance); + unsigned int status = hdata->regs->CNTR; unsigned long flags; - unsigned int status; - struct Scsi_Host *instance = (struct Scsi_Host *)_instance; - status = DMA(instance)->CNTR; if (!(status & GVP11_DMAC_INT_PENDING)) return IRQ_NONE; @@ -50,64 +49,66 @@ void gvp11_setup(char *str, int *ints) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { struct Scsi_Host *instance = cmd->device->host; - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; unsigned short cntr = GVP11_DMAC_INT_ENABLE; unsigned long addr = virt_to_bus(cmd->SCp.ptr); int bank_mask; static int scsi_alloc_out_of_range = 0; /* use bounce buffer if the physical address is bad */ - if (addr & hdata->dma_xfer_mask) { - hdata->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; + if (addr & wh->dma_xfer_mask) { + wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff; if (!scsi_alloc_out_of_range) { - hdata->dma_bounce_buffer = - kmalloc(hdata->dma_bounce_len, GFP_KERNEL); - hdata->dma_buffer_pool = BUF_SCSI_ALLOCED; + wh->dma_bounce_buffer = + kmalloc(wh->dma_bounce_len, GFP_KERNEL); + wh->dma_buffer_pool = BUF_SCSI_ALLOCED; } if (scsi_alloc_out_of_range || - !hdata->dma_bounce_buffer) { - hdata->dma_bounce_buffer = - amiga_chip_alloc(hdata->dma_bounce_len, + !wh->dma_bounce_buffer) { + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } - hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } /* check if the address of the bounce buffer is OK */ - addr = virt_to_bus(hdata->dma_bounce_buffer); + addr = virt_to_bus(wh->dma_bounce_buffer); - if (addr & hdata->dma_xfer_mask) { + if (addr & wh->dma_xfer_mask) { /* fall back to Chip RAM if address out of range */ - if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) { - kfree(hdata->dma_bounce_buffer); + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) { + kfree(wh->dma_bounce_buffer); scsi_alloc_out_of_range = 1; } else { - amiga_chip_free(hdata->dma_bounce_buffer); + amiga_chip_free(wh->dma_bounce_buffer); } - hdata->dma_bounce_buffer = - amiga_chip_alloc(hdata->dma_bounce_len, + wh->dma_bounce_buffer = + amiga_chip_alloc(wh->dma_bounce_len, "GVP II SCSI Bounce Buffer"); - if (!hdata->dma_bounce_buffer) { - hdata->dma_bounce_len = 0; + if (!wh->dma_bounce_buffer) { + wh->dma_bounce_len = 0; return 1; } - addr = virt_to_bus(hdata->dma_bounce_buffer); - hdata->dma_buffer_pool = BUF_CHIP_ALLOCED; + addr = virt_to_bus(wh->dma_bounce_buffer); + wh->dma_buffer_pool = BUF_CHIP_ALLOCED; } if (!dir_in) { /* copy to bounce buffer for a write */ - memcpy(hdata->dma_bounce_buffer, cmd->SCp.ptr, + memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr, cmd->SCp.this_residual); } } @@ -116,11 +117,11 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) if (!dir_in) cntr |= GVP11_DMAC_DIR_WRITE; - hdata->dma_dir = dir_in; - DMA(cmd->device->host)->CNTR = cntr; + wh->dma_dir = dir_in; + regs->CNTR = cntr; /* setup DMA *physical* address */ - DMA(cmd->device->host)->ACR = addr; + regs->ACR = addr; if (dir_in) { /* invalidate any cache */ @@ -130,12 +131,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) cache_push(addr, cmd->SCp.this_residual); } - bank_mask = (~hdata->dma_xfer_mask >> 18) & 0x01c0; + bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0; if (bank_mask) - DMA(cmd->device->host)->BANK = bank_mask & (addr >> 18); + regs->BANK = bank_mask & (addr >> 18); /* start DMA */ - DMA(cmd->device->host)->ST_DMA = 1; + regs->ST_DMA = 1; /* return success */ return 0; @@ -144,236 +145,53 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int status) { - struct WD33C93_hostdata *hdata = shost_priv(instance); + struct gvp11_hostdata *hdata = shost_priv(instance); + struct WD33C93_hostdata *wh = &hdata->wh; + struct gvp11_scsiregs *regs = hdata->regs; /* stop DMA */ - DMA(instance)->SP_DMA = 1; + regs->SP_DMA = 1; /* remove write bit from CONTROL bits */ - DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; + regs->CNTR = GVP11_DMAC_INT_ENABLE; /* copy from a bounce buffer, if necessary */ - if (status && hdata->dma_bounce_buffer) { - if (hdata->dma_dir && SCpnt) - memcpy(SCpnt->SCp.ptr, hdata->dma_bounce_buffer, + if (status && wh->dma_bounce_buffer) { + if (wh->dma_dir && SCpnt) + memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer, SCpnt->SCp.this_residual); - if (hdata->dma_buffer_pool == BUF_SCSI_ALLOCED) - kfree(hdata->dma_bounce_buffer); - else - amiga_chip_free(hdata->dma_bounce_buffer); - - hdata->dma_bounce_buffer = NULL; - hdata->dma_bounce_len = 0; - } -} - -#define CHECK_WD33C93 - -int __init gvp11_detect(struct scsi_host_template *tpnt) -{ - static unsigned char called = 0; - struct Scsi_Host *instance; - unsigned long address; - unsigned int epc; - struct zorro_dev *z = NULL; - unsigned int default_dma_xfer_mask; - struct WD33C93_hostdata *hdata; - wd33c93_regs regs; - int num_gvp11 = 0; -#ifdef CHECK_WD33C93 - volatile unsigned char *sasr_3393, *scmd_3393; - unsigned char save_sasr; - unsigned char q, qq; -#endif - - if (!MACH_IS_AMIGA || called) - return 0; - called = 1; - - tpnt->proc_name = "GVP11"; - tpnt->proc_info = &wd33c93_proc_info; - - while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { - /* - * This should (hopefully) be the correct way to identify - * all the different GVP SCSI controllers (except for the - * SERIES I though). - */ - - if (z->id == ZORRO_PROD_GVP_COMBO_030_R3_SCSI || - z->id == ZORRO_PROD_GVP_SERIES_II) - default_dma_xfer_mask = ~0x00ffffff; - else if (z->id == ZORRO_PROD_GVP_GFORCE_030_SCSI || - z->id == ZORRO_PROD_GVP_A530_SCSI || - z->id == ZORRO_PROD_GVP_COMBO_030_R4_SCSI) - default_dma_xfer_mask = ~0x01ffffff; - else if (z->id == ZORRO_PROD_GVP_A1291 || - z->id == ZORRO_PROD_GVP_GFORCE_040_SCSI_1) - default_dma_xfer_mask = ~0x07ffffff; + if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) + kfree(wh->dma_bounce_buffer); else - continue; - - /* - * Rumors state that some GVP ram boards use the same product - * code as the SCSI controllers. Therefore if the board-size - * is not 64KB we asume it is a ram board and bail out. - */ - if (z->resource.end - z->resource.start != 0xffff) - continue; + amiga_chip_free(wh->dma_bounce_buffer); - address = z->resource.start; - if (!request_mem_region(address, 256, "wd33c93")) - continue; - -#ifdef CHECK_WD33C93 - - /* - * These darn GVP boards are a problem - it can be tough to tell - * whether or not they include a SCSI controller. This is the - * ultimate Yet-Another-GVP-Detection-Hack in that it actually - * probes for a WD33c93 chip: If we find one, it's extremely - * likely that this card supports SCSI, regardless of Product_ - * Code, Board_Size, etc. - */ - - /* Get pointers to the presumed register locations and save contents */ - - sasr_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SASR); - scmd_3393 = &(((gvp11_scsiregs *)(ZTWO_VADDR(address)))->SCMD); - save_sasr = *sasr_3393; - - /* First test the AuxStatus Reg */ - - q = *sasr_3393; /* read it */ - if (q & 0x08) /* bit 3 should always be clear */ - goto release; - *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ - if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ - *sasr_3393 = save_sasr; /* Oops - restore this byte */ - goto release; - } - if (*sasr_3393 != q) { /* should still read the same */ - *sasr_3393 = save_sasr; /* Oops - restore this byte */ - goto release; - } - if (*scmd_3393 != q) /* and so should the image at 0x1f */ - goto release; - - /* - * Ok, we probably have a wd33c93, but let's check a few other places - * for good measure. Make sure that this works for both 'A and 'B - * chip versions. - */ - - *sasr_3393 = WD_SCSI_STATUS; - q = *scmd_3393; - *sasr_3393 = WD_SCSI_STATUS; - *scmd_3393 = ~q; - *sasr_3393 = WD_SCSI_STATUS; - qq = *scmd_3393; - *sasr_3393 = WD_SCSI_STATUS; - *scmd_3393 = q; - if (qq != q) /* should be read only */ - goto release; - *sasr_3393 = 0x1e; /* this register is unimplemented */ - q = *scmd_3393; - *sasr_3393 = 0x1e; - *scmd_3393 = ~q; - *sasr_3393 = 0x1e; - qq = *scmd_3393; - *sasr_3393 = 0x1e; - *scmd_3393 = q; - if (qq != q || qq != 0xff) /* should be read only, all 1's */ - goto release; - *sasr_3393 = WD_TIMEOUT_PERIOD; - q = *scmd_3393; - *sasr_3393 = WD_TIMEOUT_PERIOD; - *scmd_3393 = ~q; - *sasr_3393 = WD_TIMEOUT_PERIOD; - qq = *scmd_3393; - *sasr_3393 = WD_TIMEOUT_PERIOD; - *scmd_3393 = q; - if (qq != (~q & 0xff)) /* should be read/write */ - goto release; -#endif - - instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (instance == NULL) - goto release; - instance->base = ZTWO_VADDR(address); - instance->irq = IRQ_AMIGA_PORTS; - instance->unique_id = z->slotaddr; - - hdata = shost_priv(instance); - if (gvp11_xfer_mask) - hdata->dma_xfer_mask = gvp11_xfer_mask; - else - hdata->dma_xfer_mask = default_dma_xfer_mask; - - DMA(instance)->secret2 = 1; - DMA(instance)->secret1 = 0; - DMA(instance)->secret3 = 15; - while (DMA(instance)->CNTR & GVP11_DMAC_BUSY) - ; - DMA(instance)->CNTR = 0; - - DMA(instance)->BANK = 0; - - epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); - - /* - * Check for 14MHz SCSI clock - */ - regs.SASR = &(DMA(instance)->SASR); - regs.SCMD = &(DMA(instance)->SCMD); - hdata->no_sync = 0xff; - hdata->fast = 0; - hdata->dma_mode = CTRL_DMA; - wd33c93_init(instance, regs, dma_setup, dma_stop, - (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 - : WD33C93_FS_12_15); - - if (request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, - "GVP11 SCSI", instance)) - goto unregister; - DMA(instance)->CNTR = GVP11_DMAC_INT_ENABLE; - num_gvp11++; - continue; - -unregister: - scsi_unregister(instance); -release: - release_mem_region(address, 256); + wh->dma_bounce_buffer = NULL; + wh->dma_bounce_len = 0; } - - return num_gvp11; } static int gvp11_bus_reset(struct scsi_cmnd *cmd) { + struct Scsi_Host *instance = cmd->device->host; + /* FIXME perform bus-specific reset */ /* FIXME 2: shouldn't we no-op this function (return FAILED), and fall back to host reset function, wd33c93_host_reset ? */ - spin_lock_irq(cmd->device->host->host_lock); + spin_lock_irq(instance->host_lock); wd33c93_host_reset(cmd); - spin_unlock_irq(cmd->device->host->host_lock); + spin_unlock_irq(instance->host_lock); return SUCCESS; } - -#define HOSTS_C - -#include "gvp11.h" - -static struct scsi_host_template driver_template = { - .proc_name = "GVP11", +static struct scsi_host_template gvp11_scsi_template = { + .module = THIS_MODULE, .name = "GVP Series II SCSI", - .detect = gvp11_detect, - .release = gvp11_release, + .proc_info = wd33c93_proc_info, + .proc_name = "GVP11", .queuecommand = wd33c93_queuecommand, .eh_abort_handler = wd33c93_abort, .eh_bus_reset_handler = gvp11_bus_reset, @@ -385,17 +203,230 @@ static struct scsi_host_template driver_template = { .use_clustering = DISABLE_CLUSTERING }; +static int __devinit check_wd33c93(struct gvp11_scsiregs *regs) +{ +#ifdef CHECK_WD33C93 + volatile unsigned char *sasr_3393, *scmd_3393; + unsigned char save_sasr; + unsigned char q, qq; -#include "scsi_module.c" + /* + * These darn GVP boards are a problem - it can be tough to tell + * whether or not they include a SCSI controller. This is the + * ultimate Yet-Another-GVP-Detection-Hack in that it actually + * probes for a WD33c93 chip: If we find one, it's extremely + * likely that this card supports SCSI, regardless of Product_ + * Code, Board_Size, etc. + */ + + /* Get pointers to the presumed register locations and save contents */ + + sasr_3393 = ®s->SASR; + scmd_3393 = ®s->SCMD; + save_sasr = *sasr_3393; + + /* First test the AuxStatus Reg */ + + q = *sasr_3393; /* read it */ + if (q & 0x08) /* bit 3 should always be clear */ + return -ENODEV; + *sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */ + if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*sasr_3393 != q) { /* should still read the same */ + *sasr_3393 = save_sasr; /* Oops - restore this byte */ + return -ENODEV; + } + if (*scmd_3393 != q) /* and so should the image at 0x1f */ + return -ENODEV; + + /* + * Ok, we probably have a wd33c93, but let's check a few other places + * for good measure. Make sure that this works for both 'A and 'B + * chip versions. + */ + + *sasr_3393 = WD_SCSI_STATUS; + q = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = ~q; + *sasr_3393 = WD_SCSI_STATUS; + qq = *scmd_3393; + *sasr_3393 = WD_SCSI_STATUS; + *scmd_3393 = q; + if (qq != q) /* should be read only */ + return -ENODEV; + *sasr_3393 = 0x1e; /* this register is unimplemented */ + q = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = ~q; + *sasr_3393 = 0x1e; + qq = *scmd_3393; + *sasr_3393 = 0x1e; + *scmd_3393 = q; + if (qq != q || qq != 0xff) /* should be read only, all 1's */ + return -ENODEV; + *sasr_3393 = WD_TIMEOUT_PERIOD; + q = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = ~q; + *sasr_3393 = WD_TIMEOUT_PERIOD; + qq = *scmd_3393; + *sasr_3393 = WD_TIMEOUT_PERIOD; + *scmd_3393 = q; + if (qq != (~q & 0xff)) /* should be read/write */ + return -ENODEV; +#endif /* CHECK_WD33C93 */ -int gvp11_release(struct Scsi_Host *instance) + return 0; +} + +static int __devinit gvp11_probe(struct zorro_dev *z, + const struct zorro_device_id *ent) { -#ifdef MODULE - DMA(instance)->CNTR = 0; - release_mem_region(ZTWO_PADDR(instance->base), 256); + struct Scsi_Host *instance; + unsigned long address; + int error; + unsigned int epc; + unsigned int default_dma_xfer_mask; + struct gvp11_hostdata *hdata; + struct gvp11_scsiregs *regs; + wd33c93_regs wdregs; + + default_dma_xfer_mask = ent->driver_data; + + /* + * Rumors state that some GVP ram boards use the same product + * code as the SCSI controllers. Therefore if the board-size + * is not 64KB we asume it is a ram board and bail out. + */ + if (zorro_resource_len(z) != 0x10000) + return -ENODEV; + + address = z->resource.start; + if (!request_mem_region(address, 256, "wd33c93")) + return -EBUSY; + + regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); + + error = check_wd33c93(regs); + if (error) + goto fail_check_or_alloc; + + instance = scsi_host_alloc(&gvp11_scsi_template, + sizeof(struct gvp11_hostdata)); + if (!instance) { + error = -ENOMEM; + goto fail_check_or_alloc; + } + + instance->irq = IRQ_AMIGA_PORTS; + instance->unique_id = z->slotaddr; + + regs->secret2 = 1; + regs->secret1 = 0; + regs->secret3 = 15; + while (regs->CNTR & GVP11_DMAC_BUSY) + ; + regs->CNTR = 0; + regs->BANK = 0; + + wdregs.SASR = ®s->SASR; + wdregs.SCMD = ®s->SCMD; + + hdata = shost_priv(instance); + if (gvp11_xfer_mask) + hdata->wh.dma_xfer_mask = gvp11_xfer_mask; + else + hdata->wh.dma_xfer_mask = default_dma_xfer_mask; + + hdata->wh.no_sync = 0xff; + hdata->wh.fast = 0; + hdata->wh.dma_mode = CTRL_DMA; + hdata->regs = regs; + + /* + * Check for 14MHz SCSI clock + */ + epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000); + wd33c93_init(instance, wdregs, dma_setup, dma_stop, + (epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10 + : WD33C93_FS_12_15); + + error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED, + "GVP11 SCSI", instance); + if (error) + goto fail_irq; + + regs->CNTR = GVP11_DMAC_INT_ENABLE; + + error = scsi_add_host(instance, NULL); + if (error) + goto fail_host; + + zorro_set_drvdata(z, instance); + scsi_scan_host(instance); + return 0; + +fail_host: free_irq(IRQ_AMIGA_PORTS, instance); -#endif - return 1; +fail_irq: + scsi_host_put(instance); +fail_check_or_alloc: + release_mem_region(address, 256); + return error; +} + +static void __devexit gvp11_remove(struct zorro_dev *z) +{ + struct Scsi_Host *instance = zorro_get_drvdata(z); + struct gvp11_hostdata *hdata = shost_priv(instance); + + hdata->regs->CNTR = 0; + scsi_remove_host(instance); + free_irq(IRQ_AMIGA_PORTS, instance); + scsi_host_put(instance); + release_mem_region(z->resource.start, 256); +} + + /* + * This should (hopefully) be the correct way to identify + * all the different GVP SCSI controllers (except for the + * SERIES I though). + */ + +static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = { + { ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff }, + { ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff }, + { ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff }, + { ZORRO_PROD_GVP_A1291, ~0x07ffffff }, + { ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff }, + { 0 } +}; +MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl); + +static struct zorro_driver gvp11_driver = { + .name = "gvp11", + .id_table = gvp11_zorro_tbl, + .probe = gvp11_probe, + .remove = __devexit_p(gvp11_remove), +}; + +static int __init gvp11_init(void) +{ + return zorro_register_driver(&gvp11_driver); +} +module_init(gvp11_init); + +static void __exit gvp11_exit(void) +{ + zorro_unregister_driver(&gvp11_driver); } +module_exit(gvp11_exit); +MODULE_DESCRIPTION("GVP Series II SCSI"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/gvp11.h b/drivers/scsi/gvp11.h index e2efdf9601ef..852913cde5dd 100644 --- a/drivers/scsi/gvp11.h +++ b/drivers/scsi/gvp11.h @@ -11,9 +11,6 @@ #include -int gvp11_detect(struct scsi_host_template *); -int gvp11_release(struct Scsi_Host *); - #ifndef CMD_PER_LUN #define CMD_PER_LUN 2 #endif @@ -22,15 +19,13 @@ int gvp11_release(struct Scsi_Host *); #define CAN_QUEUE 16 #endif -#ifndef HOSTS_C - /* * if the transfer address ANDed with this results in a non-zero * result, then we can't use DMA. */ #define GVP11_XFER_MASK (0xff000001) -typedef struct { +struct gvp11_scsiregs { unsigned char pad1[64]; volatile unsigned short CNTR; unsigned char pad2[31]; @@ -46,7 +41,7 @@ typedef struct { volatile unsigned short SP_DMA; volatile unsigned short secret2; /* store 1 here */ volatile unsigned short secret3; /* store 15 here */ -} gvp11_scsiregs; +}; /* bits in CNTR */ #define GVP11_DMAC_BUSY (1<<0) @@ -54,6 +49,4 @@ typedef struct { #define GVP11_DMAC_INT_ENABLE (1<<3) #define GVP11_DMAC_DIR_WRITE (1<<4) -#endif /* else def HOSTS_C */ - #endif /* GVP11_H */ diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 6a6661c35b2f..82ea4a8226b0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -567,7 +567,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); @@ -576,19 +577,19 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; - if (ipr_cmd->ioa_cfg->sis64) + if (ipr_cmd->ioa_cfg->sis64) { ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); - else { + ioasa64->u.gata.status = 0; + } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioasa->u.gata.status = 0; } - ioasa->ioasc = 0; - ioasa->residual_data_len = 0; - ioasa->u.gata.status = 0; - + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; ipr_cmd->scsi_cmd = NULL; ipr_cmd->qc = NULL; ipr_cmd->sense_buffer[0] = 0; @@ -768,8 +769,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { list_del(&ipr_cmd->queue); - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); - ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); + ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID); if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; @@ -1040,7 +1041,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, proto = cfgtew->u.cfgte64->proto; res->res_flags = cfgtew->u.cfgte64->res_flags; res->qmodel = IPR_QUEUEING_MODEL64(res); - res->type = cfgtew->u.cfgte64->res_type & 0x0f; + res->type = cfgtew->u.cfgte64->res_type; memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, sizeof(res->res_path)); @@ -1319,7 +1320,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_del(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); @@ -2354,7 +2355,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 fd_ioasc; if (ioa_cfg->sis64) @@ -4509,11 +4510,16 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, } ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); - if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) - memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata, - sizeof(struct ipr_ioasa_gata)); + if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) { + if (ipr_cmd->ioa_cfg->sis64) + memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, + sizeof(struct ipr_ioasa_gata)); + else + memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); + } LEAVE; return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); @@ -4768,7 +4774,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT); - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); /* * If the abort task timed out and we sent a bus reset, we will get @@ -4812,15 +4818,39 @@ static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) /** * ipr_handle_other_interrupt - Handle "other" interrupts * @ioa_cfg: ioa config struct - * @int_reg: interrupt register * * Return value: * IRQ_NONE / IRQ_HANDLED **/ -static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, - volatile u32 int_reg) +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg) { irqreturn_t rc = IRQ_HANDLED; + volatile u32 int_reg, int_mask_reg; + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ + if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + return IRQ_HANDLED; + } + } + + return IRQ_NONE; + } if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { /* Mask the interrupt */ @@ -4881,7 +4911,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; unsigned long lock_flags = 0; - volatile u32 int_reg, int_mask_reg; + volatile u32 int_reg; u32 ioasc; u16 cmd_index; int num_hrrq = 0; @@ -4896,33 +4926,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_NONE; } - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; - - /* If an interrupt on the adapter did not occur, ignore it. - * Or in the case of SIS 64, check for a stage change interrupt. - */ - if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { - if (ioa_cfg->sis64) { - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { - - /* clear stage change */ - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; - list_del(&ioa_cfg->reset_cmd->queue); - del_timer(&ioa_cfg->reset_cmd->timer); - ipr_reset_ioa_job(ioa_cfg->reset_cmd); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_HANDLED; - } - } - - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return IRQ_NONE; - } - while (1) { ipr_cmd = NULL; @@ -4940,7 +4943,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); @@ -4962,7 +4965,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) /* Clear the PCI interrupt */ do { writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); @@ -4977,7 +4980,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) } if (unlikely(rc == IRQ_NONE)) - rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); + rc = ipr_handle_other_interrupt(ioa_cfg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return rc; @@ -5014,6 +5017,10 @@ static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, ipr_cmd->dma_use_sg = nseg; + ioarcb->data_transfer_length = cpu_to_be32(length); + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; @@ -5135,7 +5142,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { scsi_cmd->result |= (DID_ERROR << 16); @@ -5166,7 +5173,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); @@ -5174,8 +5181,8 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) ioarcb->read_data_transfer_length = 0; ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; - ioasa->ioasc = 0; - ioasa->residual_data_len = 0; + ioasa->hdr.ioasc = 0; + ioasa->hdr.residual_data_len = 0; if (ipr_cmd->ioa_cfg->sis64) ioarcb->u.sis64_addr_data.data_ioadl_addr = @@ -5200,7 +5207,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) { struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { ipr_erp_done(ipr_cmd); @@ -5277,12 +5284,12 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, int i; u16 data_len; u32 ioasc, fd_ioasc; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; __be32 *ioasa_data = (__be32 *)ioasa; int error_index; - ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; - fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK; + ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK; + fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK; if (0 == ioasc) return; @@ -5297,7 +5304,7 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { /* Don't log an error if the IOA already logged one */ - if (ioasa->ilid != 0) + if (ioasa->hdr.ilid != 0) return; if (!ipr_is_gscsi(res)) @@ -5309,10 +5316,11 @@ static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error); - if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) + data_len = be16_to_cpu(ioasa->hdr.ret_stat_len); + if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len) + data_len = sizeof(struct ipr_ioasa64); + else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len) data_len = sizeof(struct ipr_ioasa); - else - data_len = be16_to_cpu(ioasa->ret_stat_len); ipr_err("IOASA Dump:\n"); @@ -5338,8 +5346,8 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) u32 failing_lba; u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - u32 ioasc = be32_to_cpu(ioasa->ioasc); + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc); memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); @@ -5382,7 +5390,7 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) /* Illegal request */ if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && - (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { + (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) { sense_buf[7] = 10; /* additional length */ /* IOARCB was in error */ @@ -5393,10 +5401,10 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) sense_buf[16] = ((IPR_FIELD_POINTER_MASK & - be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; + be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff; sense_buf[17] = (IPR_FIELD_POINTER_MASK & - be32_to_cpu(ioasa->ioasc_specific)) & 0xff; + be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff; } else { if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { if (ipr_is_vset_device(res)) @@ -5428,14 +5436,20 @@ static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) **/ static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd) { - struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa; + struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64; - if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) + if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0) return 0; - memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, - min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), - SCSI_SENSE_BUFFERSIZE)); + if (ipr_cmd->ioa_cfg->sis64) + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); + else + memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data, + min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len), + SCSI_SENSE_BUFFERSIZE)); return 1; } @@ -5455,7 +5469,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; if (!res) { @@ -5547,9 +5561,9 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len)); + scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(ipr_cmd->scsi_cmd); @@ -5839,19 +5853,23 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) struct ata_queued_cmd *qc = ipr_cmd->qc; struct ipr_sata_port *sata_port = qc->ap->private_data; struct ipr_resource_entry *res = sata_port->res; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata, - sizeof(struct ipr_ioasa_gata)); + if (ipr_cmd->ioa_cfg->sis64) + memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata, + sizeof(struct ipr_ioasa_gata)); + else + memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata, + sizeof(struct ipr_ioasa_gata)); ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); - if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) + if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) - qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); + qc->err_mask |= __ac_err_mask(sata_port->ioasa.status); else - qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status); + qc->err_mask |= ac_err_mask(sata_port->ioasa.status); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); ata_qc_complete(qc); } @@ -6520,7 +6538,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); dev_err(&ioa_cfg->pdev->dev, "0x%02X failed with IOASC: 0x%08X\n", @@ -6544,7 +6562,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_set_supported_devs; @@ -6634,7 +6652,7 @@ static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd) **/ static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd) { - u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; @@ -6706,7 +6724,7 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) list_move_tail(&res->queue, &old_res); if (ioa_cfg->sis64) - entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; + entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); else entries = ioa_cfg->u.cfg_table->hdr.num_entries; @@ -6792,6 +6810,7 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; + ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff; ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; @@ -7122,7 +7141,9 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); /* sanity check the stage_time value */ - if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) + if (stage_time == 0) + stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME; + else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; @@ -7165,13 +7186,14 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; volatile u32 int_reg; + volatile u64 maskval; ENTER; ipr_cmd->job_step = ipr_ioafp_identify_hrrq; ipr_init_ioa_mem(ioa_cfg); ioa_cfg->allow_interrupts = 1; - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32); if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), @@ -7183,9 +7205,12 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) /* Enable destructive diagnostics on IOA */ writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); - writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); - if (ioa_cfg->sis64) - writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); + if (ioa_cfg->sis64) { + maskval = IPR_PCII_IPL_STAGE_CHANGE; + maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS; + writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg); + } else + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); @@ -7332,12 +7357,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) rc = pci_restore_state(ioa_cfg->pdev); if (rc != PCIBIOS_SUCCESSFUL) { - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); return IPR_RC_JOB_CONTINUE; } if (ipr_set_pcix_cmd_reg(ioa_cfg)) { - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); return IPR_RC_JOB_CONTINUE; } @@ -7364,7 +7389,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) } } - ENTER; + LEAVE; return IPR_RC_JOB_CONTINUE; } @@ -7406,7 +7431,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) if (rc != PCIBIOS_SUCCESSFUL) { pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev); - ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); rc = IPR_RC_JOB_CONTINUE; } else { ipr_cmd->job_step = ipr_reset_bist_done; @@ -7665,7 +7690,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; do { - ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (ioa_cfg->reset_cmd != ipr_cmd) { /* @@ -8048,13 +8073,13 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) ioarcb->u.sis64_addr_data.data_ioadl_addr = cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64)); } else { ioarcb->write_ioadl_addr = cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; ioarcb->ioasa_host_pci_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa)); } ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); ipr_cmd->cmd_index = i; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4c267b5e0b96..9ecd2259eb39 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -244,6 +244,7 @@ #define IPR_RUNTIME_RESET 0x40000000 #define IPR_IPL_INIT_MIN_STAGE_TIME 5 +#define IPR_IPL_INIT_DEFAULT_STAGE_TIME 15 #define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 #define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 #define IPR_IPL_INIT_STAGE_MASK 0xff000000 @@ -613,7 +614,7 @@ struct ipr_auto_sense { __be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)]; }; -struct ipr_ioasa { +struct ipr_ioasa_hdr { __be32 ioasc; #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) @@ -645,6 +646,25 @@ struct ipr_ioasa { #define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) #define IPR_FIELD_POINTER_MASK 0x0000ffff +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa { + struct ipr_ioasa_hdr hdr; + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + struct ipr_ioasa_gata gata; + } u; + + struct ipr_auto_sense auto_sense; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa64 { + struct ipr_ioasa_hdr hdr; + u8 fd_res_path[8]; + union { struct ipr_ioasa_vset vset; struct ipr_ioasa_af_dasd dasd; @@ -804,7 +824,7 @@ struct ipr_hostrcb_array_data_entry_enhanced { }__attribute__((packed, aligned (4))); struct ipr_hostrcb_type_ff_error { - __be32 ioa_data[502]; + __be32 ioa_data[758]; }__attribute__((packed, aligned (4))); struct ipr_hostrcb_type_01_error { @@ -1181,7 +1201,7 @@ struct ipr_resource_entry { u8 flags; __be16 res_flags; - __be32 type; + u8 type; u8 qmodel; struct ipr_std_inq_data std_inq_data; @@ -1464,7 +1484,10 @@ struct ipr_cmnd { struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; struct ipr_ata64_ioadl ata_ioadl; } i; - struct ipr_ioasa ioasa; + union { + struct ipr_ioasa ioasa; + struct ipr_ioasa64 ioasa64; + } s; struct list_head queue; struct scsi_cmnd *scsi_cmd; struct ata_queued_cmd *qc; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index bf55d3057413..fec47de72535 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -601,10 +601,8 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); - if (sk_sleep(sock->sk)) { - sock->sk->sk_err = EIO; - wake_up_interruptible(sk_sleep(sock->sk)); - } + sock->sk->sk_err = EIO; + wake_up_interruptible(sk_sleep(sock->sk)); iscsi_conn_stop(cls_conn, flag); iscsi_sw_tcp_release_conn(conn); diff --git a/drivers/scsi/mvme147.c b/drivers/scsi/mvme147.c index 716d1785cda7..c29d0dbb9660 100644 --- a/drivers/scsi/mvme147.c +++ b/drivers/scsi/mvme147.c @@ -16,12 +16,12 @@ #include -static struct Scsi_Host *mvme147_host = NULL; - -static irqreturn_t mvme147_intr(int irq, void *dummy) +static irqreturn_t mvme147_intr(int irq, void *data) { + struct Scsi_Host *instance = data; + if (irq == MVME147_IRQ_SCSI_PORT) - wd33c93_intr(mvme147_host); + wd33c93_intr(instance); else m147_pcc->dma_intr = 0x89; /* Ack and enable ints */ return IRQ_HANDLED; @@ -29,7 +29,8 @@ static irqreturn_t mvme147_intr(int irq, void *dummy) static int dma_setup(struct scsi_cmnd *cmd, int dir_in) { - struct WD33C93_hostdata *hdata = shost_priv(mvme147_host); + struct Scsi_Host *instance = cmd->device->host; + struct WD33C93_hostdata *hdata = shost_priv(instance); unsigned char flags = 0x01; unsigned long addr = virt_to_bus(cmd->SCp.ptr); @@ -66,6 +67,7 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, int mvme147_detect(struct scsi_host_template *tpnt) { static unsigned char called = 0; + struct Scsi_Host *instance; wd33c93_regs regs; struct WD33C93_hostdata *hdata; @@ -76,25 +78,25 @@ int mvme147_detect(struct scsi_host_template *tpnt) tpnt->proc_name = "MVME147"; tpnt->proc_info = &wd33c93_proc_info; - mvme147_host = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); - if (!mvme147_host) + instance = scsi_register(tpnt, sizeof(struct WD33C93_hostdata)); + if (!instance) goto err_out; - mvme147_host->base = 0xfffe4000; - mvme147_host->irq = MVME147_IRQ_SCSI_PORT; + instance->base = 0xfffe4000; + instance->irq = MVME147_IRQ_SCSI_PORT; regs.SASR = (volatile unsigned char *)0xfffe4000; regs.SCMD = (volatile unsigned char *)0xfffe4001; - hdata = shost_priv(mvme147_host); + hdata = shost_priv(instance); hdata->no_sync = 0xff; hdata->fast = 0; hdata->dma_mode = CTRL_DMA; - wd33c93_init(mvme147_host, regs, dma_setup, dma_stop, WD33C93_FS_8_10); + wd33c93_init(instance, regs, dma_setup, dma_stop, WD33C93_FS_8_10); if (request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0, - "MVME147 SCSI PORT", mvme147_intr)) + "MVME147 SCSI PORT", instance)) goto err_unregister; if (request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0, - "MVME147 SCSI DMA", mvme147_intr)) + "MVME147 SCSI DMA", instance)) goto err_free_irq; #if 0 /* Disabled; causes problems booting */ m147_pcc->scsi_interrupt = 0x10; /* Assert SCSI bus reset */ @@ -113,7 +115,7 @@ int mvme147_detect(struct scsi_host_template *tpnt) err_free_irq: free_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr); err_unregister: - scsi_unregister(mvme147_host); + scsi_unregister(instance); err_out: return 0; } @@ -132,9 +134,6 @@ static int mvme147_bus_reset(struct scsi_cmnd *cmd) return SUCCESS; } -#define HOSTS_C - -#include "mvme147.h" static struct scsi_host_template driver_template = { .proc_name = "MVME147", diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 8dbf1c3afb7b..d64b7178fa08 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -3587,7 +3587,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name if (i == (-ENOSPC)) { transfer = STp->buffer->writing; /* FIXME -- check this logic */ if (transfer <= do_count) { - filp->f_pos += do_count - transfer; + *ppos += do_count - transfer; count -= do_count - transfer; if (STps->drv_block >= 0) { STps->drv_block += (do_count - transfer) / STp->block_size; @@ -3625,7 +3625,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name goto out; } - filp->f_pos += do_count; + *ppos += do_count; b_point += do_count; count -= do_count; if (STps->drv_block >= 0) { @@ -3647,7 +3647,7 @@ if (SRpnt) printk(KERN_ERR "%s:A: Not supposed to have SRpnt at line %d\n", name if (STps->drv_block >= 0) { STps->drv_block += blks; } - filp->f_pos += count; + *ppos += count; count = 0; } @@ -3823,7 +3823,7 @@ static ssize_t osst_read(struct file * filp, char __user * buf, size_t count, lo } STp->logical_blk_num += transfer / STp->block_size; STps->drv_block += transfer / STp->block_size; - filp->f_pos += transfer; + *ppos += transfer; buf += transfer; total += transfer; } @@ -5626,6 +5626,7 @@ static const struct file_operations osst_fops = { .open = os_scsi_tape_open, .flush = os_scsi_tape_flush, .release = os_scsi_tape_close, + .llseek = noop_llseek, }; static int osst_supports(struct scsi_device * SDp) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 9798c2c06b93..1c027a97d8b9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -492,19 +492,20 @@ void scsi_target_reap(struct scsi_target *starget) struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); unsigned long flags; enum scsi_target_state state; - int empty; + int empty = 0; spin_lock_irqsave(shost->host_lock, flags); state = starget->state; - empty = --starget->reap_ref == 0 && - list_empty(&starget->devices) ? 1 : 0; + if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { + empty = 1; + starget->state = STARGET_DEL; + } spin_unlock_irqrestore(shost->host_lock, flags); if (!empty) return; BUG_ON(state == STARGET_DEL); - starget->state = STARGET_DEL; if (state == STARGET_CREATED) scsi_target_destroy(starget); else diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 3ea1a713ef25..24211d0efa6d 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3962,6 +3962,7 @@ static const struct file_operations st_fops = .open = st_open, .flush = st_flush, .release = st_release, + .llseek = noop_llseek, }; static int st_probe(struct device *dev) diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c index 8dc03837617b..4a789e5361a4 100644 --- a/drivers/serial/s5pv210.c +++ b/drivers/serial/s5pv210.c @@ -119,7 +119,7 @@ static int s5p_serial_probe(struct platform_device *pdev) return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]); } -static struct platform_driver s5p_serial_drv = { +static struct platform_driver s5p_serial_driver = { .probe = s5p_serial_probe, .remove = __devexit_p(s3c24xx_serial_remove), .driver = { @@ -130,19 +130,19 @@ static struct platform_driver s5p_serial_drv = { static int __init s5pv210_serial_console_init(void) { - return s3c24xx_serial_initconsole(&s5p_serial_drv, s5p_uart_inf); + return s3c24xx_serial_initconsole(&s5p_serial_driver, s5p_uart_inf); } console_initcall(s5pv210_serial_console_init); static int __init s5p_serial_init(void) { - return s3c24xx_serial_init(&s5p_serial_drv, *s5p_uart_inf); + return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf); } static void __exit s5p_serial_exit(void) { - platform_driver_unregister(&s5p_serial_drv); + platform_driver_unregister(&s5p_serial_driver); } module_init(s5p_serial_init); diff --git a/drivers/sfi/sfi_acpi.c b/drivers/sfi/sfi_acpi.c index 34aba30eb84b..f5b4ca581541 100644 --- a/drivers/sfi/sfi_acpi.c +++ b/drivers/sfi/sfi_acpi.c @@ -173,3 +173,44 @@ int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id, sfi_acpi_put_table(table); return ret; } + +static ssize_t sfi_acpi_table_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct sfi_table_attr *tbl_attr = + container_of(bin_attr, struct sfi_table_attr, attr); + struct acpi_table_header *th = NULL; + struct sfi_table_key key; + ssize_t cnt; + + key.sig = tbl_attr->name; + key.oem_id = NULL; + key.oem_table_id = NULL; + + th = sfi_acpi_get_table(&key); + if (!th) + return 0; + + cnt = memory_read_from_buffer(buf, count, &offset, + th, th->length); + sfi_acpi_put_table(th); + + return cnt; +} + + +void __init sfi_acpi_sysfs_init(void) +{ + u32 tbl_cnt, i; + struct sfi_table_attr *tbl_attr; + + tbl_cnt = XSDT_GET_NUM_ENTRIES(xsdt_va, u64); + for (i = 0; i < tbl_cnt; i++) { + tbl_attr = + sfi_sysfs_install_table(xsdt_va->table_offset_entry[i]); + tbl_attr->attr.read = sfi_acpi_table_show; + } + + return; +} diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c index b204a0929139..005195958647 100644 --- a/drivers/sfi/sfi_core.c +++ b/drivers/sfi/sfi_core.c @@ -67,6 +67,7 @@ #include #include #include +#include #include "sfi_core.h" @@ -382,6 +383,102 @@ static __init int sfi_find_syst(void) return -1; } +static struct kobject *sfi_kobj; +static struct kobject *tables_kobj; + +static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct sfi_table_attr *tbl_attr = + container_of(bin_attr, struct sfi_table_attr, attr); + struct sfi_table_header *th = NULL; + struct sfi_table_key key; + ssize_t cnt; + + key.sig = tbl_attr->name; + key.oem_id = NULL; + key.oem_table_id = NULL; + + if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) { + th = sfi_get_table(&key); + if (!th) + return 0; + + cnt = memory_read_from_buffer(buf, count, &offset, + th, th->len); + sfi_put_table(th); + } else + cnt = memory_read_from_buffer(buf, count, &offset, + syst_va, syst_va->header.len); + + return cnt; +} + +struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa) +{ + struct sfi_table_attr *tbl_attr; + struct sfi_table_header *th; + int ret; + + tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL); + if (!tbl_attr) + return NULL; + + th = sfi_map_table(pa); + if (!th || !th->sig[0]) { + kfree(tbl_attr); + return NULL; + } + + sysfs_attr_init(&tbl_attr->attr.attr); + memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE); + + tbl_attr->attr.size = 0; + tbl_attr->attr.read = sfi_table_show; + tbl_attr->attr.attr.name = tbl_attr->name; + tbl_attr->attr.attr.mode = 0400; + + ret = sysfs_create_bin_file(tables_kobj, + &tbl_attr->attr); + if (ret) + kfree(tbl_attr); + + sfi_unmap_table(th); + return tbl_attr; +} + +static int __init sfi_sysfs_init(void) +{ + int tbl_cnt, i; + + if (sfi_disabled) + return 0; + + sfi_kobj = kobject_create_and_add("sfi", firmware_kobj); + if (!sfi_kobj) + return 0; + + tables_kobj = kobject_create_and_add("tables", sfi_kobj); + if (!tables_kobj) { + kobject_put(sfi_kobj); + return 0; + } + + sfi_sysfs_install_table(syst_pa); + + tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64); + + for (i = 0; i < tbl_cnt; i++) + sfi_sysfs_install_table(syst_va->pentry[i]); + + sfi_acpi_sysfs_init(); + kobject_uevent(sfi_kobj, KOBJ_ADD); + kobject_uevent(tables_kobj, KOBJ_ADD); + pr_info("SFI sysfs interfaces init success\n"); + return 0; +} + void __init sfi_init(void) { if (!acpi_disabled) @@ -390,7 +487,7 @@ void __init sfi_init(void) if (sfi_disabled) return; - pr_info("Simple Firmware Interface v0.7 http://simplefirmware.org\n"); + pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n"); if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init()) disable_sfi(); @@ -414,3 +511,9 @@ void __init sfi_init_late(void) sfi_acpi_init(); } + +/* + * The reason we put it here becasue we need wait till the /sys/firmware + * is setup, then our interface can be registered in /sys/firmware/sfi + */ +core_initcall(sfi_sysfs_init); diff --git a/drivers/sfi/sfi_core.h b/drivers/sfi/sfi_core.h index da82d39e104d..b7cf220d44ec 100644 --- a/drivers/sfi/sfi_core.h +++ b/drivers/sfi/sfi_core.h @@ -61,6 +61,12 @@ struct sfi_table_key{ char *oem_table_id; }; +/* sysfs interface */ +struct sfi_table_attr { + struct bin_attribute attr; + char name[8]; +}; + #define SFI_ANY_KEY { .sig = NULL, .oem_id = NULL, .oem_table_id = NULL } extern int __init sfi_acpi_init(void); @@ -68,3 +74,5 @@ extern struct sfi_table_header *sfi_check_table(u64 paddr, struct sfi_table_key *key); struct sfi_table_header *sfi_get_table(struct sfi_table_key *key); extern void sfi_put_table(struct sfi_table_header *table); +extern struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa); +extern void __init sfi_acpi_sysfs_init(void); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 989e2752cc36..6dcda86be6eb 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -625,9 +625,12 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, ssb_printk(KERN_ERR PFX "No SPROM available!\n"); return -ENODEV; } - - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? - SSB_SPROM_BASE1 : SSB_SPROM_BASE31; + if (bus->chipco.dev) { /* can be unavailible! */ + bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? + SSB_SPROM_BASE1 : SSB_SPROM_BASE31; + } else { + bus->sprom_offset = SSB_SPROM_BASE1; + } buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); if (!buf) diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index 007bc3a03486..4f7cc8d13277 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c @@ -185,6 +185,7 @@ bool ssb_is_sprom_available(struct ssb_bus *bus) /* this routine differs from specs as we do not access SPROM directly on PCMCIA */ if (bus->bustype == SSB_BUSTYPE_PCI && + bus->chipco.dev && /* can be unavailible! */ bus->chipco.dev->id.revision >= 31) return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; diff --git a/drivers/staging/go7007/saa7134-go7007.c b/drivers/staging/go7007/saa7134-go7007.c index 49f0d31c118a..cf7c34a99459 100644 --- a/drivers/staging/go7007/saa7134-go7007.c +++ b/drivers/staging/go7007/saa7134-go7007.c @@ -242,13 +242,13 @@ static void saa7134_go7007_irq_ts_done(struct saa7134_dev *dev, printk(KERN_DEBUG "saa7134-go7007: irq: lost %ld\n", (status >> 16) & 0x0f); if (status & 0x100000) { - dma_sync_single(&dev->pci->dev, - saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&dev->pci->dev, + saa->bottom_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->bottom, PAGE_SIZE); saa_writel(SAA7134_RS_BA2(5), cpu_to_le32(saa->bottom_dma)); } else { - dma_sync_single(&dev->pci->dev, - saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(&dev->pci->dev, + saa->top_dma, PAGE_SIZE, DMA_FROM_DEVICE); go7007_parse_video_stream(go, saa->top, PAGE_SIZE); saa_writel(SAA7134_RS_BA1(5), cpu_to_le32(saa->top_dma)); } diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index 9286e863b0e7..643b413d9f0f 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -29,7 +29,6 @@ #include #include #include -#include #include "netfs.h" @@ -880,7 +879,7 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb) /* * We want fsync() to work on POHMELFS. */ -static int pohmelfs_fsync(struct file *file, struct dentry *dentry, int datasync) +static int pohmelfs_fsync(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; struct writeback_control wbc = { @@ -969,13 +968,6 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr) goto err_out_exit; } - if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { - err = dquot_transfer(inode, attr); - if (err) - goto err_out_exit; - } - err = inode_setattr(inode, attr); if (err) { dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index e89304c72568..b53deee25d74 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -5879,20 +5879,13 @@ out: static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp) { IXJ_FILTER_CADENCE *lcp; - lcp = kmalloc(sizeof(IXJ_FILTER_CADENCE), GFP_KERNEL); - if (lcp == NULL) { + lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE)); + if (IS_ERR(lcp)) { if(ixjdebug & 0x0001) { - printk(KERN_INFO "Could not allocate memory for cadence\n"); + printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n"); } - return -ENOMEM; + return PTR_ERR(lcp); } - if (copy_from_user(lcp, cp, sizeof(IXJ_FILTER_CADENCE))) { - if(ixjdebug & 0x0001) { - printk(KERN_INFO "Could not copy cadence to kernel\n"); - } - kfree(lcp); - return -EFAULT; - } if (lcp->filter > 5) { if(ixjdebug & 0x0001) { printk(KERN_INFO "Cadence out of range\n"); diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 6b8bf8c781c4..43abf55d8c60 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -794,7 +794,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } static int -printer_fsync(struct file *fd, struct dentry *dentry, int datasync) +printer_fsync(struct file *fd, int datasync) { struct printer_dev *dev = fd->private_data; unsigned long flags; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index aa88911c9504..0f41c9195e9b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -593,17 +593,17 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, int r; switch (ioctl) { case VHOST_NET_SET_BACKEND: - r = copy_from_user(&backend, argp, sizeof backend); - if (r < 0) - return r; + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; return vhost_net_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_FEATURES; - return copy_to_user(featurep, &features, sizeof features); + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; case VHOST_SET_FEATURES: - r = copy_from_user(&features, featurep, sizeof features); - if (r < 0) - return r; + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; if (features & ~VHOST_FEATURES) return -EOPNOTSUPP; return vhost_net_set_features(n, features); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c6fb8e968f21..3b83382e06eb 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -320,10 +320,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; unsigned long size = offsetof(struct vhost_memory, regions); - long r; - r = copy_from_user(&mem, m, size); - if (r) - return r; + if (copy_from_user(&mem, m, size)) + return -EFAULT; if (mem.padding) return -EOPNOTSUPP; if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) @@ -333,15 +331,16 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -ENOMEM; memcpy(newmem, &mem, size); - r = copy_from_user(newmem->regions, m->regions, - mem.nregions * sizeof *m->regions); - if (r) { + if (copy_from_user(newmem->regions, m->regions, + mem.nregions * sizeof *m->regions)) { kfree(newmem); - return r; + return -EFAULT; } - if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) + if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) { + kfree(newmem); return -EFAULT; + } oldmem = d->memory; rcu_assign_pointer(d->memory, newmem); synchronize_rcu(); @@ -374,7 +373,7 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = get_user(idx, idxp); if (r < 0) return r; - if (idx > d->nvqs) + if (idx >= d->nvqs) return -ENOBUFS; vq = d->vqs + idx; @@ -389,9 +388,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = -EBUSY; break; } - r = copy_from_user(&s, argp, sizeof s); - if (r < 0) + if (copy_from_user(&s, argp, sizeof s)) { + r = -EFAULT; break; + } if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { r = -EINVAL; break; @@ -405,9 +405,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) r = -EBUSY; break; } - r = copy_from_user(&s, argp, sizeof s); - if (r < 0) + if (copy_from_user(&s, argp, sizeof s)) { + r = -EFAULT; break; + } if (s.num > 0xffff) { r = -EINVAL; break; @@ -419,12 +420,14 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) case VHOST_GET_VRING_BASE: s.index = idx; s.num = vq->last_avail_idx; - r = copy_to_user(argp, &s, sizeof s); + if (copy_to_user(argp, &s, sizeof s)) + r = -EFAULT; break; case VHOST_SET_VRING_ADDR: - r = copy_from_user(&a, argp, sizeof a); - if (r < 0) + if (copy_from_user(&a, argp, sizeof a)) { + r = -EFAULT; break; + } if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { r = -EOPNOTSUPP; break; @@ -477,9 +480,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) vq->used = (void __user *)(unsigned long)a.used_user_addr; break; case VHOST_SET_VRING_KICK: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -492,9 +496,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) filep = eventfp; break; case VHOST_SET_VRING_CALL: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -510,9 +515,10 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) filep = eventfp; break; case VHOST_SET_VRING_ERR: - r = copy_from_user(&f, argp, sizeof f); - if (r < 0) + if (copy_from_user(&f, argp, sizeof f)) { + r = -EFAULT; break; + } eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); @@ -575,9 +581,10 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: - r = copy_from_user(&p, argp, sizeof p); - if (r < 0) + if (copy_from_user(&p, argp, sizeof p)) { + r = -EFAULT; break; + } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 68d2518fadaa..38ffc3fbcbe4 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -222,6 +222,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) data->port = __check_device(pdata, name); if (data->port < 0) { dev_err(&pdev->dev, "wrong platform data is assigned"); + kfree(data); return -EINVAL; } @@ -266,6 +267,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) backlight_update_status(bl); return 0; out: + backlight_device_unregister(bl); kfree(data); return ret; } diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index c025c84601b0..e54a337227ea 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -8,12 +8,13 @@ menuconfig BACKLIGHT_LCD_SUPPORT Enable this to be able to choose the drivers for controlling the backlight and the LCD panel on some platforms, for example on PDAs. +if BACKLIGHT_LCD_SUPPORT + # # LCD # config LCD_CLASS_DEVICE tristate "Lowlevel LCD controls" - depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of LCD. @@ -24,31 +25,32 @@ config LCD_CLASS_DEVICE To have support for your specific LCD panel you will have to select the proper drivers which depend on this option. +if LCD_CLASS_DEVICE + config LCD_CORGI tristate "LCD Panel support for SHARP corgi/spitz model" - depends on LCD_CLASS_DEVICE && SPI_MASTER && PXA_SHARPSL + depends on SPI_MASTER && PXA_SHARPSL help Say y here to support the LCD panels usually found on SHARP corgi (C7x0) and spitz (Cxx00) models. config LCD_L4F00242T03 tristate "Epson L4F00242T03 LCD" - depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GENERIC_GPIO help SPI driver for Epson L4F00242T03. This provides basic support for init and powering the LCD up/down through a sysfs interface. config LCD_LMS283GF05 tristate "Samsung LMS283GF05 LCD" - depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GENERIC_GPIO help SPI driver for Samsung LMS283GF05. This provides basic support for powering the LCD up/down through a sysfs interface. config LCD_LTV350QV tristate "Samsung LTV350QV LCD Panel" - depends on LCD_CLASS_DEVICE && SPI_MASTER - default n + depends on SPI_MASTER help If you have a Samsung LTV350QV LCD panel, say y to include a power control driver for it. The panel starts up in power @@ -59,60 +61,61 @@ config LCD_LTV350QV config LCD_ILI9320 tristate - depends on LCD_CLASS_DEVICE && BACKLIGHT_LCD_SUPPORT - default n help If you have a panel based on the ILI9320 controller chip then say y to include a power driver for it. config LCD_TDO24M tristate "Toppoly TDO24M and TDO35S LCD Panels support" - depends on LCD_CLASS_DEVICE && SPI_MASTER - default n + depends on SPI_MASTER help If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to include the support for it. config LCD_VGG2432A4 tristate "VGG2432A4 LCM device support" - depends on BACKLIGHT_LCD_SUPPORT && LCD_CLASS_DEVICE && SPI_MASTER + depends on SPI_MASTER select LCD_ILI9320 - default n help If you have a VGG2432A4 panel based on the ILI9320 controller chip then say y to include a power driver for it. config LCD_PLATFORM tristate "Platform LCD controls" - depends on LCD_CLASS_DEVICE help This driver provides a platform-device registered LCD power control interface. config LCD_TOSA tristate "Sharp SL-6000 LCD Driver" - depends on LCD_CLASS_DEVICE && SPI - depends on MACH_TOSA - default n + depends on SPI && MACH_TOSA help If you have an Sharp SL-6000 Zaurus say Y to enable a driver for its LCD. config LCD_HP700 tristate "HP Jornada 700 series LCD Driver" - depends on LCD_CLASS_DEVICE depends on SA1100_JORNADA720_SSP && !PREEMPT default y help If you have an HP Jornada 700 series handheld (710/720/728) say Y to enable LCD control driver. +config LCD_S6E63M0 + tristate "S6E63M0 AMOLED LCD Driver" + depends on SPI && BACKLIGHT_CLASS_DEVICE + default n + help + If you have an S6E63M0 LCD Panel, say Y to enable its + LCD control driver. + +endif # LCD_CLASS_DEVICE + # # Backlight # config BACKLIGHT_CLASS_DEVICE tristate "Lowlevel Backlight controls" - depends on BACKLIGHT_LCD_SUPPORT default m help This framework adds support for low-level control of the LCD @@ -121,9 +124,11 @@ config BACKLIGHT_CLASS_DEVICE To have support for your specific LCD panel you will have to select the proper drivers which depend on this option. +if BACKLIGHT_CLASS_DEVICE + config BACKLIGHT_ATMEL_LCDC bool "Atmel LCDC Contrast-as-Backlight control" - depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL + depends on FB_ATMEL default y if MACH_SAM9261EK || MACH_SAM9G10EK || MACH_SAM9263EK help This provides a backlight control internal to the Atmel LCDC @@ -136,8 +141,7 @@ config BACKLIGHT_ATMEL_LCDC config BACKLIGHT_ATMEL_PWM tristate "Atmel PWM backlight control" - depends on BACKLIGHT_CLASS_DEVICE && ATMEL_PWM - default n + depends on ATMEL_PWM help Say Y here if you want to use the PWM peripheral in Atmel AT91 and AVR32 devices. This driver will need additional platform data to know @@ -146,9 +150,18 @@ config BACKLIGHT_ATMEL_PWM To compile this driver as a module, choose M here: the module will be called atmel-pwm-bl. +config BACKLIGHT_EP93XX + tristate "Cirrus EP93xx Backlight Driver" + depends on FB_EP93XX + help + If you have a LCD backlight connected to the BRIGHT output of + the EP93xx, say Y here to enable this driver. + + To compile this driver as a module, choose M here: the module will + be called ep93xx_bl. + config BACKLIGHT_GENERIC tristate "Generic (aka Sharp Corgi) Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE default y help Say y to enable the generic platform backlight driver previously @@ -157,7 +170,7 @@ config BACKLIGHT_GENERIC config BACKLIGHT_LOCOMO tristate "Sharp LOCOMO LCD/Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && SHARP_LOCOMO + depends on SHARP_LOCOMO default y help If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to @@ -165,7 +178,7 @@ config BACKLIGHT_LOCOMO config BACKLIGHT_OMAP1 tristate "OMAP1 PWL-based LCD Backlight" - depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1 + depends on ARCH_OMAP1 default y help This driver controls the LCD backlight level and power for @@ -174,7 +187,7 @@ config BACKLIGHT_OMAP1 config BACKLIGHT_HP680 tristate "HP Jornada 680 Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX + depends on SH_HP6XX default y help If you have a HP Jornada 680, say y to enable the @@ -182,7 +195,6 @@ config BACKLIGHT_HP680 config BACKLIGHT_HP700 tristate "HP Jornada 700 series Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE depends on SA1100_JORNADA720_SSP && !PREEMPT default y help @@ -191,76 +203,70 @@ config BACKLIGHT_HP700 config BACKLIGHT_PROGEAR tristate "Frontpath ProGear Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && PCI && X86 - default n + depends on PCI && X86 help If you have a Frontpath ProGear say Y to enable the backlight driver. config BACKLIGHT_CARILLO_RANCH tristate "Intel Carillo Ranch Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578 - default n + depends on LCD_CLASS_DEVICE && PCI && X86 && FB_LE80578 help If you have a Intel LE80578 (Carillo Ranch) say Y to enable the backlight driver. config BACKLIGHT_PWM tristate "Generic PWM based Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && HAVE_PWM + depends on HAVE_PWM help If you have a LCD backlight adjustable by PWM, say Y to enable this driver. config BACKLIGHT_DA903X tristate "Backlight Driver for DA9030/DA9034 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && PMIC_DA903X + depends on PMIC_DA903X help If you have a LCD backlight connected to the WLED output of DA9030 or DA9034 WLED output, say Y here to enable this driver. config BACKLIGHT_MAX8925 tristate "Backlight driver for MAX8925" - depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925 + depends on MFD_MAX8925 help If you have a LCD backlight connected to the WLED output of MAX8925 WLED output, say Y here to enable this driver. config BACKLIGHT_MBP_NVIDIA tristate "MacBook Pro Nvidia Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && X86 - default n + depends on X86 help If you have an Apple Macbook Pro with Nvidia graphics hardware say Y to enable a driver for its backlight config BACKLIGHT_TOSA tristate "Sharp SL-6000 Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && I2C - depends on MACH_TOSA && LCD_TOSA - default n + depends on I2C && MACH_TOSA && LCD_TOSA help If you have an Sharp SL-6000 Zaurus say Y to enable a driver for its backlight config BACKLIGHT_SAHARA tristate "Tabletkiosk Sahara Touch-iT Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && X86 - default n + depends on X86 help If you have a Tabletkiosk Sahara Touch-iT, say y to enable the backlight driver. config BACKLIGHT_WM831X tristate "WM831x PMIC Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X + depends on MFD_WM831X help If you have a backlight driven by the ISINK and DCDC of a WM831x PMIC say y to enable the backlight driver for it. config BACKLIGHT_ADX tristate "Avionic Design Xanthos Backlight Driver" - depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX + depends on ARCH_PXA_ADX default y help Say Y to enable the backlight driver on Avionic Design Xanthos-based @@ -268,7 +274,7 @@ config BACKLIGHT_ADX config BACKLIGHT_ADP5520 tristate "Backlight Driver for ADP5520/ADP5501 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520 + depends on PMIC_ADP5520 help If you have a LCD backlight connected to the BST/BL_SNK output of ADP5520 or ADP5501, say Y here to enable this driver. @@ -276,9 +282,31 @@ config BACKLIGHT_ADP5520 To compile this driver as a module, choose M here: the module will be called adp5520_bl. +config BACKLIGHT_ADP8860 + tristate "Backlight Driver for ADP8860/ADP8861/ADP8863 using WLED" + depends on BACKLIGHT_CLASS_DEVICE && I2C + select NEW_LEDS + select LEDS_CLASS + help + If you have a LCD backlight connected to the ADP8860, ADP8861 or + ADP8863 say Y here to enable this driver. + + To compile this driver as a module, choose M here: the module will + be called adp8860_bl. + config BACKLIGHT_88PM860X tristate "Backlight Driver for 88PM8606 using WLED" - depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X + depends on MFD_88PM860X help Say Y to enable the backlight driver for Marvell 88PM8606. +config BACKLIGHT_PCF50633 + tristate "Backlight driver for NXP PCF50633 MFD" + depends on BACKLIGHT_CLASS_DEVICE && MFD_PCF50633 + help + If you have a backlight driven by a NXP PCF50633 MFD, say Y here to + enable its driver. + +endif # BACKLIGHT_CLASS_DEVICE + +endif # BACKLIGHT_LCD_SUPPORT diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 09d1f14d6257..44c0f81ad85d 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -11,9 +11,11 @@ obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o obj-$(CONFIG_LCD_TDO24M) += tdo24m.o obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o +obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o +obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o @@ -30,5 +32,7 @@ obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o +obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o +obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c new file mode 100644 index 000000000000..921ca37398f3 --- /dev/null +++ b/drivers/video/backlight/adp8860_bl.c @@ -0,0 +1,838 @@ +/* + * Backlight driver for Analog Devices ADP8860 Backlight Devices + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#define ADP8860_EXT_FEATURES +#define ADP8860_USE_LEDS + +#define ADP8860_MFDVID 0x00 /* Manufacturer and device ID */ +#define ADP8860_MDCR 0x01 /* Device mode and status */ +#define ADP8860_MDCR2 0x02 /* Device mode and Status Register 2 */ +#define ADP8860_INTR_EN 0x03 /* Interrupts enable */ +#define ADP8860_CFGR 0x04 /* Configuration register */ +#define ADP8860_BLSEN 0x05 /* Sink enable backlight or independent */ +#define ADP8860_BLOFF 0x06 /* Backlight off timeout */ +#define ADP8860_BLDIM 0x07 /* Backlight dim timeout */ +#define ADP8860_BLFR 0x08 /* Backlight fade in and out rates */ +#define ADP8860_BLMX1 0x09 /* Backlight (Brightness Level 1-daylight) maximum current */ +#define ADP8860_BLDM1 0x0A /* Backlight (Brightness Level 1-daylight) dim current */ +#define ADP8860_BLMX2 0x0B /* Backlight (Brightness Level 2-office) maximum current */ +#define ADP8860_BLDM2 0x0C /* Backlight (Brightness Level 2-office) dim current */ +#define ADP8860_BLMX3 0x0D /* Backlight (Brightness Level 3-dark) maximum current */ +#define ADP8860_BLDM3 0x0E /* Backlight (Brightness Level 3-dark) dim current */ +#define ADP8860_ISCFR 0x0F /* Independent sink current fade control register */ +#define ADP8860_ISCC 0x10 /* Independent sink current control register */ +#define ADP8860_ISCT1 0x11 /* Independent Sink Current Timer Register LED[7:5] */ +#define ADP8860_ISCT2 0x12 /* Independent Sink Current Timer Register LED[4:1] */ +#define ADP8860_ISCF 0x13 /* Independent sink current fade register */ +#define ADP8860_ISC7 0x14 /* Independent Sink Current LED7 */ +#define ADP8860_ISC6 0x15 /* Independent Sink Current LED6 */ +#define ADP8860_ISC5 0x16 /* Independent Sink Current LED5 */ +#define ADP8860_ISC4 0x17 /* Independent Sink Current LED4 */ +#define ADP8860_ISC3 0x18 /* Independent Sink Current LED3 */ +#define ADP8860_ISC2 0x19 /* Independent Sink Current LED2 */ +#define ADP8860_ISC1 0x1A /* Independent Sink Current LED1 */ +#define ADP8860_CCFG 0x1B /* Comparator configuration */ +#define ADP8860_CCFG2 0x1C /* Second comparator configuration */ +#define ADP8860_L2_TRP 0x1D /* L2 comparator reference */ +#define ADP8860_L2_HYS 0x1E /* L2 hysteresis */ +#define ADP8860_L3_TRP 0x1F /* L3 comparator reference */ +#define ADP8860_L3_HYS 0x20 /* L3 hysteresis */ +#define ADP8860_PH1LEVL 0x21 /* First phototransistor ambient light level-low byte register */ +#define ADP8860_PH1LEVH 0x22 /* First phototransistor ambient light level-high byte register */ +#define ADP8860_PH2LEVL 0x23 /* Second phototransistor ambient light level-low byte register */ +#define ADP8860_PH2LEVH 0x24 /* Second phototransistor ambient light level-high byte register */ + +#define ADP8860_MANUFID 0x0 /* Analog Devices ADP8860 Manufacturer ID */ +#define ADP8861_MANUFID 0x4 /* Analog Devices ADP8861 Manufacturer ID */ +#define ADP8863_MANUFID 0x2 /* Analog Devices ADP8863 Manufacturer ID */ + +#define ADP8860_DEVID(x) ((x) & 0xF) +#define ADP8860_MANID(x) ((x) >> 4) + +/* MDCR Device mode and status */ +#define INT_CFG (1 << 6) +#define NSTBY (1 << 5) +#define DIM_EN (1 << 4) +#define GDWN_DIS (1 << 3) +#define SIS_EN (1 << 2) +#define CMP_AUTOEN (1 << 1) +#define BLEN (1 << 0) + +/* ADP8860_CCFG Main ALS comparator level enable */ +#define L3_EN (1 << 1) +#define L2_EN (1 << 0) + +#define CFGR_BLV_SHIFT 3 +#define CFGR_BLV_MASK 0x3 +#define ADP8860_FLAG_LED_MASK 0xFF + +#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4)) +#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1)) +#define ALS_CCFG_VAL(filt) ((0x7 & filt) << 5) + +enum { + adp8860, + adp8861, + adp8863 +}; + +struct adp8860_led { + struct led_classdev cdev; + struct work_struct work; + struct i2c_client *client; + enum led_brightness new_brightness; + int id; + int flags; +}; + +struct adp8860_bl { + struct i2c_client *client; + struct backlight_device *bl; + struct adp8860_led *led; + struct adp8860_backlight_platform_data *pdata; + struct mutex lock; + unsigned long cached_daylight_max; + int id; + int revid; + int current_brightness; + unsigned en_ambl_sens:1; + unsigned gdwn_dis:1; +}; + +static int adp8860_read(struct i2c_client *client, int reg, uint8_t *val) +{ + int ret; + + ret = i2c_smbus_read_byte_data(client, reg); + if (ret < 0) { + dev_err(&client->dev, "failed reading at 0x%02x\n", reg); + return ret; + } + + *val = (uint8_t)ret; + return 0; +} + +static int adp8860_write(struct i2c_client *client, u8 reg, u8 val) +{ + return i2c_smbus_write_byte_data(client, reg, val); +} + +static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + uint8_t reg_val; + int ret; + + mutex_lock(&data->lock); + + ret = adp8860_read(client, reg, ®_val); + + if (!ret && ((reg_val & bit_mask) == 0)) { + reg_val |= bit_mask; + ret = adp8860_write(client, reg, reg_val); + } + + mutex_unlock(&data->lock); + return ret; +} + +static int adp8860_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + uint8_t reg_val; + int ret; + + mutex_lock(&data->lock); + + ret = adp8860_read(client, reg, ®_val); + + if (!ret && (reg_val & bit_mask)) { + reg_val &= ~bit_mask; + ret = adp8860_write(client, reg, reg_val); + } + + mutex_unlock(&data->lock); + return ret; +} + +/* + * Independent sink / LED + */ +#if defined(ADP8860_USE_LEDS) +static void adp8860_led_work(struct work_struct *work) +{ + struct adp8860_led *led = container_of(work, struct adp8860_led, work); + adp8860_write(led->client, ADP8860_ISC1 - led->id + 1, + led->new_brightness >> 1); +} + +static void adp8860_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct adp8860_led *led; + + led = container_of(led_cdev, struct adp8860_led, cdev); + led->new_brightness = value; + schedule_work(&led->work); +} + +static int adp8860_led_setup(struct adp8860_led *led) +{ + struct i2c_client *client = led->client; + int ret = 0; + + ret = adp8860_write(client, ADP8860_ISC1 - led->id + 1, 0); + ret |= adp8860_set_bits(client, ADP8860_ISCC, 1 << (led->id - 1)); + + if (led->id > 4) + ret |= adp8860_set_bits(client, ADP8860_ISCT1, + (led->flags & 0x3) << ((led->id - 5) * 2)); + else + ret |= adp8860_set_bits(client, ADP8860_ISCT2, + (led->flags & 0x3) << ((led->id - 1) * 2)); + + return ret; +} + +static int __devinit adp8860_led_probe(struct i2c_client *client) +{ + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct adp8860_bl *data = i2c_get_clientdata(client); + struct adp8860_led *led, *led_dat; + struct led_info *cur_led; + int ret, i; + + led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); + if (led == NULL) { + dev_err(&client->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + + ret = adp8860_write(client, ADP8860_ISCFR, pdata->led_fade_law); + ret = adp8860_write(client, ADP8860_ISCT1, + (pdata->led_on_time & 0x3) << 6); + ret |= adp8860_write(client, ADP8860_ISCF, + FADE_VAL(pdata->led_fade_in, pdata->led_fade_out)); + + if (ret) { + dev_err(&client->dev, "failed to write\n"); + goto err_free; + } + + for (i = 0; i < pdata->num_leds; ++i) { + cur_led = &pdata->leds[i]; + led_dat = &led[i]; + + led_dat->id = cur_led->flags & ADP8860_FLAG_LED_MASK; + + if (led_dat->id > 7 || led_dat->id < 1) { + dev_err(&client->dev, "Invalid LED ID %d\n", + led_dat->id); + goto err; + } + + if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) { + dev_err(&client->dev, "LED %d used by Backlight\n", + led_dat->id); + goto err; + } + + led_dat->cdev.name = cur_led->name; + led_dat->cdev.default_trigger = cur_led->default_trigger; + led_dat->cdev.brightness_set = adp8860_led_set; + led_dat->cdev.brightness = LED_OFF; + led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT; + led_dat->client = client; + led_dat->new_brightness = LED_OFF; + INIT_WORK(&led_dat->work, adp8860_led_work); + + ret = led_classdev_register(&client->dev, &led_dat->cdev); + if (ret) { + dev_err(&client->dev, "failed to register LED %d\n", + led_dat->id); + goto err; + } + + ret = adp8860_led_setup(led_dat); + if (ret) { + dev_err(&client->dev, "failed to write\n"); + i++; + goto err; + } + } + + data->led = led; + + return 0; + + err: + for (i = i - 1; i >= 0; --i) { + led_classdev_unregister(&led[i].cdev); + cancel_work_sync(&led[i].work); + } + + err_free: + kfree(led); + + return ret; +} + +static int __devexit adp8860_led_remove(struct i2c_client *client) +{ + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct adp8860_bl *data = i2c_get_clientdata(client); + int i; + + for (i = 0; i < pdata->num_leds; i++) { + led_classdev_unregister(&data->led[i].cdev); + cancel_work_sync(&data->led[i].work); + } + + kfree(data->led); + return 0; +} +#else +static int __devinit adp8860_led_probe(struct i2c_client *client) +{ + return 0; +} + +static int __devexit adp8860_led_remove(struct i2c_client *client) +{ + return 0; +} +#endif + +static int adp8860_bl_set(struct backlight_device *bl, int brightness) +{ + struct adp8860_bl *data = bl_get_data(bl); + struct i2c_client *client = data->client; + int ret = 0; + + if (data->en_ambl_sens) { + if ((brightness > 0) && (brightness < ADP8860_MAX_BRIGHTNESS)) { + /* Disable Ambient Light auto adjust */ + ret |= adp8860_clr_bits(client, ADP8860_MDCR, + CMP_AUTOEN); + ret |= adp8860_write(client, ADP8860_BLMX1, brightness); + } else { + /* + * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust + * restore daylight l1 sysfs brightness + */ + ret |= adp8860_write(client, ADP8860_BLMX1, + data->cached_daylight_max); + ret |= adp8860_set_bits(client, ADP8860_MDCR, + CMP_AUTOEN); + } + } else + ret |= adp8860_write(client, ADP8860_BLMX1, brightness); + + if (data->current_brightness && brightness == 0) + ret |= adp8860_set_bits(client, + ADP8860_MDCR, DIM_EN); + else if (data->current_brightness == 0 && brightness) + ret |= adp8860_clr_bits(client, + ADP8860_MDCR, DIM_EN); + + if (!ret) + data->current_brightness = brightness; + + return ret; +} + +static int adp8860_bl_update_status(struct backlight_device *bl) +{ + int brightness = bl->props.brightness; + if (bl->props.power != FB_BLANK_UNBLANK) + brightness = 0; + + if (bl->props.fb_blank != FB_BLANK_UNBLANK) + brightness = 0; + + return adp8860_bl_set(bl, brightness); +} + +static int adp8860_bl_get_brightness(struct backlight_device *bl) +{ + struct adp8860_bl *data = bl_get_data(bl); + + return data->current_brightness; +} + +static const struct backlight_ops adp8860_bl_ops = { + .update_status = adp8860_bl_update_status, + .get_brightness = adp8860_bl_get_brightness, +}; + +static int adp8860_bl_setup(struct backlight_device *bl) +{ + struct adp8860_bl *data = bl_get_data(bl); + struct i2c_client *client = data->client; + struct adp8860_backlight_platform_data *pdata = data->pdata; + int ret = 0; + + ret |= adp8860_write(client, ADP8860_BLSEN, ~pdata->bl_led_assign); + ret |= adp8860_write(client, ADP8860_BLMX1, pdata->l1_daylight_max); + ret |= adp8860_write(client, ADP8860_BLDM1, pdata->l1_daylight_dim); + + if (data->en_ambl_sens) { + data->cached_daylight_max = pdata->l1_daylight_max; + ret |= adp8860_write(client, ADP8860_BLMX2, + pdata->l2_office_max); + ret |= adp8860_write(client, ADP8860_BLDM2, + pdata->l2_office_dim); + ret |= adp8860_write(client, ADP8860_BLMX3, + pdata->l3_dark_max); + ret |= adp8860_write(client, ADP8860_BLDM3, + pdata->l3_dark_dim); + + ret |= adp8860_write(client, ADP8860_L2_TRP, pdata->l2_trip); + ret |= adp8860_write(client, ADP8860_L2_HYS, pdata->l2_hyst); + ret |= adp8860_write(client, ADP8860_L3_TRP, pdata->l3_trip); + ret |= adp8860_write(client, ADP8860_L3_HYS, pdata->l3_hyst); + ret |= adp8860_write(client, ADP8860_CCFG, L2_EN | L3_EN | + ALS_CCFG_VAL(pdata->abml_filt)); + } + + ret |= adp8860_write(client, ADP8860_CFGR, + BL_CFGR_VAL(pdata->bl_fade_law, 0)); + + ret |= adp8860_write(client, ADP8860_BLFR, FADE_VAL(pdata->bl_fade_in, + pdata->bl_fade_out)); + + ret |= adp8860_set_bits(client, ADP8860_MDCR, BLEN | DIM_EN | NSTBY | + (data->gdwn_dis ? GDWN_DIS : 0)); + + return ret; +} + +static ssize_t adp8860_show(struct device *dev, char *buf, int reg) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, reg, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + return sprintf(buf, "%u\n", reg_val); +} + +static ssize_t adp8860_store(struct device *dev, const char *buf, + size_t count, int reg) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + mutex_lock(&data->lock); + adp8860_write(data->client, reg, val); + mutex_unlock(&data->lock); + + return count; +} + +static ssize_t adp8860_bl_l3_dark_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX3); +} + +static ssize_t adp8860_bl_l3_dark_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLMX3); +} + +static DEVICE_ATTR(l3_dark_max, 0664, adp8860_bl_l3_dark_max_show, + adp8860_bl_l3_dark_max_store); + +static ssize_t adp8860_bl_l2_office_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX2); +} + +static ssize_t adp8860_bl_l2_office_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLMX2); +} +static DEVICE_ATTR(l2_office_max, 0664, adp8860_bl_l2_office_max_show, + adp8860_bl_l2_office_max_store); + +static ssize_t adp8860_bl_l1_daylight_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLMX1); +} + +static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + + strict_strtoul(buf, 10, &data->cached_daylight_max); + return adp8860_store(dev, buf, count, ADP8860_BLMX1); +} +static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, + adp8860_bl_l1_daylight_max_store); + +static ssize_t adp8860_bl_l3_dark_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM3); +} + +static ssize_t adp8860_bl_l3_dark_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM3); +} +static DEVICE_ATTR(l3_dark_dim, 0664, adp8860_bl_l3_dark_dim_show, + adp8860_bl_l3_dark_dim_store); + +static ssize_t adp8860_bl_l2_office_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM2); +} + +static ssize_t adp8860_bl_l2_office_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM2); +} +static DEVICE_ATTR(l2_office_dim, 0664, adp8860_bl_l2_office_dim_show, + adp8860_bl_l2_office_dim_store); + +static ssize_t adp8860_bl_l1_daylight_dim_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return adp8860_show(dev, buf, ADP8860_BLDM1); +} + +static ssize_t adp8860_bl_l1_daylight_dim_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + return adp8860_store(dev, buf, count, ADP8860_BLDM1); +} +static DEVICE_ATTR(l1_daylight_dim, 0664, adp8860_bl_l1_daylight_dim_show, + adp8860_bl_l1_daylight_dim_store); + +#ifdef ADP8860_EXT_FEATURES +static ssize_t adp8860_bl_ambient_light_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + uint16_t ret_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, ADP8860_PH1LEVL, ®_val); + ret_val = reg_val; + error |= adp8860_read(data->client, ADP8860_PH1LEVH, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + /* Return 13-bit conversion value for the first light sensor */ + ret_val += (reg_val & 0x1F) << 8; + + return sprintf(buf, "%u\n", ret_val); +} +static DEVICE_ATTR(ambient_light_level, 0444, + adp8860_bl_ambient_light_level_show, NULL); + +static ssize_t adp8860_bl_ambient_light_zone_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + int error; + uint8_t reg_val; + + mutex_lock(&data->lock); + error = adp8860_read(data->client, ADP8860_CFGR, ®_val); + mutex_unlock(&data->lock); + + if (error < 0) + return error; + + return sprintf(buf, "%u\n", + ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1); +} + +static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adp8860_bl *data = dev_get_drvdata(dev); + unsigned long val; + uint8_t reg_val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + return ret; + + if (val == 0) { + /* Enable automatic ambient light sensing */ + adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); + } else if ((val > 0) && (val < 6)) { + /* Disable automatic ambient light sensing */ + adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); + + /* Set user supplied ambient light zone */ + mutex_lock(&data->lock); + adp8860_read(data->client, ADP8860_CFGR, ®_val); + reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); + reg_val |= val << CFGR_BLV_SHIFT; + adp8860_write(data->client, ADP8860_CFGR, reg_val); + mutex_unlock(&data->lock); + } + + return count; +} +static DEVICE_ATTR(ambient_light_zone, 0664, + adp8860_bl_ambient_light_zone_show, + adp8860_bl_ambient_light_zone_store); +#endif + +static struct attribute *adp8860_bl_attributes[] = { + &dev_attr_l3_dark_max.attr, + &dev_attr_l3_dark_dim.attr, + &dev_attr_l2_office_max.attr, + &dev_attr_l2_office_dim.attr, + &dev_attr_l1_daylight_max.attr, + &dev_attr_l1_daylight_dim.attr, +#ifdef ADP8860_EXT_FEATURES + &dev_attr_ambient_light_level.attr, + &dev_attr_ambient_light_zone.attr, +#endif + NULL +}; + +static const struct attribute_group adp8860_bl_attr_group = { + .attrs = adp8860_bl_attributes, +}; + +static int __devinit adp8860_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct backlight_device *bl; + struct adp8860_bl *data; + struct adp8860_backlight_platform_data *pdata = + client->dev.platform_data; + struct backlight_properties props; + uint8_t reg_val; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); + return -EIO; + } + + if (!pdata) { + dev_err(&client->dev, "no platform data?\n"); + return -EINVAL; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + ret = adp8860_read(client, ADP8860_MFDVID, ®_val); + if (ret < 0) + goto out2; + + switch (ADP8860_MANID(reg_val)) { + case ADP8863_MANUFID: + data->gdwn_dis = !!pdata->gdwn_dis; + case ADP8860_MANUFID: + data->en_ambl_sens = !!pdata->en_ambl_sens; + break; + case ADP8861_MANUFID: + data->gdwn_dis = !!pdata->gdwn_dis; + break; + default: + dev_err(&client->dev, "failed to probe\n"); + ret = -ENODEV; + goto out2; + } + + /* It's confirmed that the DEVID field is actually a REVID */ + + data->revid = ADP8860_DEVID(reg_val); + data->client = client; + data->pdata = pdata; + data->id = id->driver_data; + data->current_brightness = 0; + i2c_set_clientdata(client, data); + + memset(&props, 0, sizeof(props)); + props.max_brightness = ADP8860_MAX_BRIGHTNESS; + + mutex_init(&data->lock); + + bl = backlight_device_register(dev_driver_string(&client->dev), + &client->dev, data, &adp8860_bl_ops, &props); + if (IS_ERR(bl)) { + dev_err(&client->dev, "failed to register backlight\n"); + ret = PTR_ERR(bl); + goto out2; + } + + bl->props.max_brightness = + bl->props.brightness = ADP8860_MAX_BRIGHTNESS; + + data->bl = bl; + + if (data->en_ambl_sens) + ret = sysfs_create_group(&bl->dev.kobj, + &adp8860_bl_attr_group); + + if (ret) { + dev_err(&client->dev, "failed to register sysfs\n"); + goto out1; + } + + ret = adp8860_bl_setup(bl); + if (ret) { + ret = -EIO; + goto out; + } + + backlight_update_status(bl); + + dev_info(&client->dev, "%s Rev.%d Backlight\n", + client->name, data->revid); + + if (pdata->num_leds) + adp8860_led_probe(client); + + return 0; + +out: + if (data->en_ambl_sens) + sysfs_remove_group(&data->bl->dev.kobj, + &adp8860_bl_attr_group); +out1: + backlight_device_unregister(bl); +out2: + i2c_set_clientdata(client, NULL); + kfree(data); + + return ret; +} + +static int __devexit adp8860_remove(struct i2c_client *client) +{ + struct adp8860_bl *data = i2c_get_clientdata(client); + + adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); + + if (data->led) + adp8860_led_remove(client); + + if (data->en_ambl_sens) + sysfs_remove_group(&data->bl->dev.kobj, + &adp8860_bl_attr_group); + + backlight_device_unregister(data->bl); + i2c_set_clientdata(client, NULL); + kfree(data); + + return 0; +} + +#ifdef CONFIG_PM +static int adp8860_i2c_suspend(struct i2c_client *client, pm_message_t message) +{ + adp8860_clr_bits(client, ADP8860_MDCR, NSTBY); + + return 0; +} + +static int adp8860_i2c_resume(struct i2c_client *client) +{ + adp8860_set_bits(client, ADP8860_MDCR, NSTBY); + + return 0; +} +#else +#define adp8860_i2c_suspend NULL +#define adp8860_i2c_resume NULL +#endif + +static const struct i2c_device_id adp8860_id[] = { + { "adp8860", adp8860 }, + { "adp8861", adp8861 }, + { "adp8863", adp8863 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, adp8860_id); + +static struct i2c_driver adp8860_driver = { + .driver = { + .name = KBUILD_MODNAME, + }, + .probe = adp8860_probe, + .remove = __devexit_p(adp8860_remove), + .suspend = adp8860_i2c_suspend, + .resume = adp8860_i2c_resume, + .id_table = adp8860_id, +}; + +static int __init adp8860_init(void) +{ + return i2c_add_driver(&adp8860_driver); +} +module_init(adp8860_init); + +static void __exit adp8860_exit(void) +{ + i2c_del_driver(&adp8860_driver); +} +module_exit(adp8860_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Michael Hennerich "); +MODULE_DESCRIPTION("ADP8860 Backlight driver"); +MODULE_ALIAS("i2c:adp8860-backlight"); diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c index 7f4a7c30a98b..fe9af129c5dd 100644 --- a/drivers/video/backlight/adx_bl.c +++ b/drivers/video/backlight/adx_bl.c @@ -107,8 +107,8 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev) props.max_brightness = 0xff; bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl, &adx_backlight_ops, &props); - if (!bldev) { - ret = -ENOMEM; + if (IS_ERR(bldev)) { + ret = PTR_ERR(bldev); goto out; } diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c new file mode 100644 index 000000000000..b0cc49184803 --- /dev/null +++ b/drivers/video/backlight/ep93xx_bl.c @@ -0,0 +1,160 @@ +/* + * Driver for the Cirrus EP93xx lcd backlight + * + * Copyright (c) 2010 H Hartley Sweeten + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This driver controls the pulse width modulated brightness control output, + * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. + */ + + +#include +#include +#include +#include + +#include + +#define EP93XX_RASTER_REG(x) (EP93XX_RASTER_BASE + (x)) +#define EP93XX_RASTER_BRIGHTNESS EP93XX_RASTER_REG(0x20) + +#define EP93XX_MAX_COUNT 255 +#define EP93XX_MAX_BRIGHT 255 +#define EP93XX_DEF_BRIGHT 128 + +struct ep93xxbl { + void __iomem *mmio; + int brightness; +}; + +static int ep93xxbl_set(struct backlight_device *bl, int brightness) +{ + struct ep93xxbl *ep93xxbl = bl_get_data(bl); + + __raw_writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio); + + ep93xxbl->brightness = brightness; + + return 0; +} + +static int ep93xxbl_update_status(struct backlight_device *bl) +{ + int brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK || + bl->props.fb_blank != FB_BLANK_UNBLANK) + brightness = 0; + + return ep93xxbl_set(bl, brightness); +} + +static int ep93xxbl_get_brightness(struct backlight_device *bl) +{ + struct ep93xxbl *ep93xxbl = bl_get_data(bl); + + return ep93xxbl->brightness; +} + +static const struct backlight_ops ep93xxbl_ops = { + .update_status = ep93xxbl_update_status, + .get_brightness = ep93xxbl_get_brightness, +}; + +static int __init ep93xxbl_probe(struct platform_device *dev) +{ + struct ep93xxbl *ep93xxbl; + struct backlight_device *bl; + struct backlight_properties props; + + ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL); + if (!ep93xxbl) + return -ENOMEM; + + /* + * This register is located in the range already ioremap'ed by + * the framebuffer driver. A MFD driver seems a bit of overkill + * to handle this so use the static I/O mapping; this address + * is already virtual. + * + * NOTE: No locking is required; the framebuffer does not touch + * this register. + */ + ep93xxbl->mmio = EP93XX_RASTER_BRIGHTNESS; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = EP93XX_MAX_BRIGHT; + bl = backlight_device_register(dev->name, &dev->dev, ep93xxbl, + &ep93xxbl_ops, &props); + if (IS_ERR(bl)) + return PTR_ERR(bl); + + bl->props.brightness = EP93XX_DEF_BRIGHT; + + platform_set_drvdata(dev, bl); + + ep93xxbl_update_status(bl); + + return 0; +} + +static int ep93xxbl_remove(struct platform_device *dev) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + backlight_device_unregister(bl); + platform_set_drvdata(dev, NULL); + return 0; +} + +#ifdef CONFIG_PM +static int ep93xxbl_suspend(struct platform_device *dev, pm_message_t state) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + return ep93xxbl_set(bl, 0); +} + +static int ep93xxbl_resume(struct platform_device *dev) +{ + struct backlight_device *bl = platform_get_drvdata(dev); + + backlight_update_status(bl); + return 0; +} +#else +#define ep93xxbl_suspend NULL +#define ep93xxbl_resume NULL +#endif + +static struct platform_driver ep93xxbl_driver = { + .driver = { + .name = "ep93xx-bl", + .owner = THIS_MODULE, + }, + .probe = ep93xxbl_probe, + .remove = __devexit_p(ep93xxbl_remove), + .suspend = ep93xxbl_suspend, + .resume = ep93xxbl_resume, +}; + +static int __init ep93xxbl_init(void) +{ + return platform_driver_register(&ep93xxbl_driver); +} +module_init(ep93xxbl_init); + +static void __exit ep93xxbl_exit(void) +{ + platform_driver_unregister(&ep93xxbl_driver); +} +module_exit(ep93xxbl_exit); + +MODULE_DESCRIPTION("EP93xx Backlight Driver"); +MODULE_AUTHOR("H Hartley Sweeten "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ep93xx-bl"); diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index bcdb12c93efd..9093ef0fa869 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -125,8 +125,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (priv == NULL) { dev_err(&spi->dev, "No memory for this device.\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } dev_set_drvdata(&spi->dev, priv); @@ -139,7 +138,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 reset gpio.\n"); - return ret; + goto err; } ret = gpio_direction_output(pdata->reset_gpio, 1); @@ -151,7 +150,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi) if (ret) { dev_err(&spi->dev, "Unable to get the lcd l4f00242t03 data en gpio.\n"); - return ret; + goto err2; } ret = gpio_direction_output(pdata->data_enable_gpio, 0); @@ -222,9 +221,9 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi) gpio_free(pdata->reset_gpio); if (priv->io_reg) - regulator_put(priv->core_reg); - if (priv->core_reg) regulator_put(priv->io_reg); + if (priv->core_reg) + regulator_put(priv->core_reg); kfree(priv); diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index b5accc957ad3..b2b2c7ba1f63 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -162,6 +162,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev) backlight_update_status(bl); return 0; out: + backlight_device_unregister(bl); kfree(data); return ret; } diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 1b5d3fe6bbbc..9fb533f6373e 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -141,7 +141,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { .callback = mbp_dmi_match, .ident = "MacBook 1,1", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), }, .driver_data = (void *)&intel_chipset_data, @@ -182,6 +182,42 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&intel_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 1,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 1,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,2"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 2,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 2,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"), + }, + .driver_data = (void *)&intel_chipset_data, + }, { .callback = mbp_dmi_match, .ident = "MacBookPro 3,1", @@ -236,6 +272,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&nvidia_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 6,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, { .callback = mbp_dmi_match, .ident = "MacBookAir 2,1", diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c new file mode 100644 index 000000000000..3c424f7efdcc --- /dev/null +++ b/drivers/video/backlight/pcf50633-backlight.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen + * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +struct pcf50633_bl { + struct pcf50633 *pcf; + struct backlight_device *bl; + + unsigned int brightness; + unsigned int brightness_limit; +}; + +/* + * pcf50633_bl_set_brightness_limit + * + * Update the brightness limit for the pc50633 backlight. The actual brightness + * will not go above the limit. This is useful to limit power drain for example + * on low battery. + * + * @dev: Pointer to a pcf50633 device + * @limit: The brightness limit. Valid values are 0-63 + */ +int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit) +{ + struct pcf50633_bl *pcf_bl = platform_get_drvdata(pcf->bl_pdev); + + if (!pcf_bl) + return -ENODEV; + + pcf_bl->brightness_limit = limit & 0x3f; + backlight_update_status(pcf_bl->bl); + + return 0; +} + +static int pcf50633_bl_update_status(struct backlight_device *bl) +{ + struct pcf50633_bl *pcf_bl = bl_get_data(bl); + unsigned int new_brightness; + + + if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK) || + bl->props.power != FB_BLANK_UNBLANK) + new_brightness = 0; + else if (bl->props.brightness < pcf_bl->brightness_limit) + new_brightness = bl->props.brightness; + else + new_brightness = pcf_bl->brightness_limit; + + + if (pcf_bl->brightness == new_brightness) + return 0; + + if (new_brightness) { + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDOUT, + new_brightness); + if (!pcf_bl->brightness) + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 1); + } else { + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDENA, 0); + } + + pcf_bl->brightness = new_brightness; + + return 0; +} + +static int pcf50633_bl_get_brightness(struct backlight_device *bl) +{ + struct pcf50633_bl *pcf_bl = bl_get_data(bl); + return pcf_bl->brightness; +} + +static const struct backlight_ops pcf50633_bl_ops = { + .get_brightness = pcf50633_bl_get_brightness, + .update_status = pcf50633_bl_update_status, + .options = BL_CORE_SUSPENDRESUME, +}; + +static int __devinit pcf50633_bl_probe(struct platform_device *pdev) +{ + int ret; + struct pcf50633_bl *pcf_bl; + struct device *parent = pdev->dev.parent; + struct pcf50633_platform_data *pcf50633_data = parent->platform_data; + struct pcf50633_bl_platform_data *pdata = pcf50633_data->backlight_data; + struct backlight_properties bl_props; + + pcf_bl = kzalloc(sizeof(*pcf_bl), GFP_KERNEL); + if (!pcf_bl) + return -ENOMEM; + + bl_props.max_brightness = 0x3f; + bl_props.power = FB_BLANK_UNBLANK; + + if (pdata) { + bl_props.brightness = pdata->default_brightness; + pcf_bl->brightness_limit = pdata->default_brightness_limit; + } else { + bl_props.brightness = 0x3f; + pcf_bl->brightness_limit = 0x3f; + } + + pcf_bl->pcf = dev_to_pcf50633(pdev->dev.parent); + + pcf_bl->bl = backlight_device_register(pdev->name, &pdev->dev, pcf_bl, + &pcf50633_bl_ops, &bl_props); + + if (IS_ERR(pcf_bl->bl)) { + ret = PTR_ERR(pcf_bl->bl); + goto err_free; + } + + platform_set_drvdata(pdev, pcf_bl); + + pcf50633_reg_write(pcf_bl->pcf, PCF50633_REG_LEDDIM, pdata->ramp_time); + + /* Should be different from bl_props.brightness, so we do not exit + * update_status early the first time it's called */ + pcf_bl->brightness = pcf_bl->bl->props.brightness + 1; + + backlight_update_status(pcf_bl->bl); + + return 0; + +err_free: + kfree(pcf_bl); + + return ret; +} + +static int __devexit pcf50633_bl_remove(struct platform_device *pdev) +{ + struct pcf50633_bl *pcf_bl = platform_get_drvdata(pdev); + + backlight_device_unregister(pcf_bl->bl); + + platform_set_drvdata(pdev, NULL); + + kfree(pcf_bl); + + return 0; +} + +static struct platform_driver pcf50633_bl_driver = { + .probe = pcf50633_bl_probe, + .remove = __devexit_p(pcf50633_bl_remove), + .driver = { + .name = "pcf50633-backlight", + }, +}; + +static int __init pcf50633_bl_init(void) +{ + return platform_driver_register(&pcf50633_bl_driver); +} +module_init(pcf50633_bl_init); + +static void __exit pcf50633_bl_exit(void) +{ + platform_driver_unregister(&pcf50633_bl_driver); +} +module_exit(pcf50633_bl_exit); + +MODULE_AUTHOR("Lars-Peter Clausen "); +MODULE_DESCRIPTION("PCF50633 backlight driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:pcf50633-backlight"); diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c new file mode 100644 index 000000000000..a3128c9cb7ad --- /dev/null +++ b/drivers/video/backlight/s6e63m0.c @@ -0,0 +1,920 @@ +/* + * S6E63M0 AMOLED LCD panel driver. + * + * Author: InKi Dae + * + * Derived from drivers/video/omap/lcd-apollon.c + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "s6e63m0_gamma.h" + +#define SLEEPMSEC 0x1000 +#define ENDDEF 0x2000 +#define DEFMASK 0xFF00 +#define COMMAND_ONLY 0xFE +#define DATA_ONLY 0xFF + +#define MIN_BRIGHTNESS 0 +#define MAX_BRIGHTNESS 10 + +#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) + +struct s6e63m0 { + struct device *dev; + struct spi_device *spi; + unsigned int power; + unsigned int current_brightness; + unsigned int gamma_mode; + unsigned int gamma_table_count; + struct lcd_device *ld; + struct backlight_device *bd; + struct lcd_platform_data *lcd_pd; +}; + +static const unsigned short SEQ_PANEL_CONDITION_SET[] = { + 0xF8, 0x01, + DATA_ONLY, 0x27, + DATA_ONLY, 0x27, + DATA_ONLY, 0x07, + DATA_ONLY, 0x07, + DATA_ONLY, 0x54, + DATA_ONLY, 0x9f, + DATA_ONLY, 0x63, + DATA_ONLY, 0x86, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x33, + DATA_ONLY, 0x0d, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_DISPLAY_CONDITION_SET[] = { + 0xf2, 0x02, + DATA_ONLY, 0x03, + DATA_ONLY, 0x1c, + DATA_ONLY, 0x10, + DATA_ONLY, 0x10, + + 0xf7, 0x03, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_GAMMA_SETTING[] = { + 0xfa, 0x00, + DATA_ONLY, 0x18, + DATA_ONLY, 0x08, + DATA_ONLY, 0x24, + DATA_ONLY, 0x64, + DATA_ONLY, 0x56, + DATA_ONLY, 0x33, + DATA_ONLY, 0xb6, + DATA_ONLY, 0xba, + DATA_ONLY, 0xa8, + DATA_ONLY, 0xac, + DATA_ONLY, 0xb1, + DATA_ONLY, 0x9d, + DATA_ONLY, 0xc1, + DATA_ONLY, 0xc1, + DATA_ONLY, 0xb7, + DATA_ONLY, 0x00, + DATA_ONLY, 0x9c, + DATA_ONLY, 0x00, + DATA_ONLY, 0x9f, + DATA_ONLY, 0x00, + DATA_ONLY, 0xd6, + + 0xfa, 0x01, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ETC_CONDITION_SET[] = { + 0xf6, 0x00, + DATA_ONLY, 0x8c, + DATA_ONLY, 0x07, + + 0xb3, 0xc, + + 0xb5, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xb6, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xb7, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xb8, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xb9, 0x2c, + DATA_ONLY, 0x12, + DATA_ONLY, 0x0c, + DATA_ONLY, 0x0a, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0e, + DATA_ONLY, 0x17, + DATA_ONLY, 0x13, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x2a, + DATA_ONLY, 0x24, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x1b, + DATA_ONLY, 0x1a, + DATA_ONLY, 0x17, + + DATA_ONLY, 0x2b, + DATA_ONLY, 0x26, + DATA_ONLY, 0x22, + DATA_ONLY, 0x20, + DATA_ONLY, 0x3a, + DATA_ONLY, 0x34, + DATA_ONLY, 0x30, + DATA_ONLY, 0x2c, + DATA_ONLY, 0x29, + DATA_ONLY, 0x26, + DATA_ONLY, 0x25, + DATA_ONLY, 0x23, + DATA_ONLY, 0x21, + DATA_ONLY, 0x20, + DATA_ONLY, 0x1e, + DATA_ONLY, 0x1e, + + 0xba, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x11, + DATA_ONLY, 0x22, + DATA_ONLY, 0x33, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + DATA_ONLY, 0x44, + + DATA_ONLY, 0x55, + DATA_ONLY, 0x55, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + DATA_ONLY, 0x66, + + 0xc1, 0x4d, + DATA_ONLY, 0x96, + DATA_ONLY, 0x1d, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x01, + DATA_ONLY, 0xdf, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x03, + DATA_ONLY, 0x1f, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x00, + DATA_ONLY, 0x03, + DATA_ONLY, 0x06, + DATA_ONLY, 0x09, + DATA_ONLY, 0x0d, + DATA_ONLY, 0x0f, + DATA_ONLY, 0x12, + DATA_ONLY, 0x15, + DATA_ONLY, 0x18, + + 0xb2, 0x10, + DATA_ONLY, 0x10, + DATA_ONLY, 0x0b, + DATA_ONLY, 0x05, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ACL_ON[] = { + /* ACL on */ + 0xc0, 0x01, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ACL_OFF[] = { + /* ACL off */ + 0xc0, 0x00, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ELVSS_ON[] = { + /* ELVSS on */ + 0xb1, 0x0b, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_ELVSS_OFF[] = { + /* ELVSS off */ + 0xb1, 0x0a, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_STAND_BY_OFF[] = { + 0x11, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_STAND_BY_ON[] = { + 0x10, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + +static const unsigned short SEQ_DISPLAY_ON[] = { + 0x29, COMMAND_ONLY, + + ENDDEF, 0x0000 +}; + + +static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data) +{ + u16 buf[1]; + struct spi_message msg; + + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + }; + + buf[0] = (addr << 8) | data; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + return spi_sync(lcd->spi, &msg); +} + +static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address, + unsigned char command) +{ + int ret = 0; + + if (address != DATA_ONLY) + ret = s6e63m0_spi_write_byte(lcd, 0x0, address); + if (command != COMMAND_ONLY) + ret = s6e63m0_spi_write_byte(lcd, 0x1, command); + + return ret; +} + +static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd, + const unsigned short *wbuf) +{ + int ret = 0, i = 0; + + while ((wbuf[i] & DEFMASK) != ENDDEF) { + if ((wbuf[i] & DEFMASK) != SLEEPMSEC) { + ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]); + if (ret) + break; + } else + udelay(wbuf[i+1]*1000); + i += 2; + } + + return ret; +} + +static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma) +{ + unsigned int i = 0; + int ret = 0; + + /* disable gamma table updating. */ + ret = s6e63m0_spi_write(lcd, 0xfa, 0x00); + if (ret) { + dev_err(lcd->dev, "failed to disable gamma table updating.\n"); + goto gamma_err; + } + + for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) { + ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]); + if (ret) { + dev_err(lcd->dev, "failed to set gamma table.\n"); + goto gamma_err; + } + } + + /* update gamma table. */ + ret = s6e63m0_spi_write(lcd, 0xfa, 0x01); + if (ret) + dev_err(lcd->dev, "failed to update gamma table.\n"); + +gamma_err: + return ret; +} + +static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma) +{ + int ret = 0; + + ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]); + + return ret; +} + + +static int s6e63m0_ldi_init(struct s6e63m0 *lcd) +{ + int ret, i; + const unsigned short *init_seq[] = { + SEQ_PANEL_CONDITION_SET, + SEQ_DISPLAY_CONDITION_SET, + SEQ_GAMMA_SETTING, + SEQ_ETC_CONDITION_SET, + SEQ_ACL_ON, + SEQ_ELVSS_ON, + }; + + for (i = 0; i < ARRAY_SIZE(init_seq); i++) { + ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]); + if (ret) + break; + } + + return ret; +} + +static int s6e63m0_ldi_enable(struct s6e63m0 *lcd) +{ + int ret = 0, i; + const unsigned short *enable_seq[] = { + SEQ_STAND_BY_OFF, + SEQ_DISPLAY_ON, + }; + + for (i = 0; i < ARRAY_SIZE(enable_seq); i++) { + ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]); + if (ret) + break; + } + + return ret; +} + +static int s6e63m0_ldi_disable(struct s6e63m0 *lcd) +{ + int ret; + + ret = s6e63m0_panel_send_sequence(lcd, SEQ_STAND_BY_ON); + + return ret; +} + +static int s6e63m0_power_on(struct s6e63m0 *lcd) +{ + int ret = 0; + struct lcd_platform_data *pd = NULL; + struct backlight_device *bd = NULL; + + pd = lcd->lcd_pd; + if (!pd) { + dev_err(lcd->dev, "platform data is NULL.\n"); + return -EFAULT; + } + + bd = lcd->bd; + if (!bd) { + dev_err(lcd->dev, "backlight device is NULL.\n"); + return -EFAULT; + } + + if (!pd->power_on) { + dev_err(lcd->dev, "power_on is NULL.\n"); + return -EFAULT; + } else { + pd->power_on(lcd->ld, 1); + mdelay(pd->power_on_delay); + } + + if (!pd->reset) { + dev_err(lcd->dev, "reset is NULL.\n"); + return -EFAULT; + } else { + pd->reset(lcd->ld); + mdelay(pd->reset_delay); + } + + ret = s6e63m0_ldi_init(lcd); + if (ret) { + dev_err(lcd->dev, "failed to initialize ldi.\n"); + return ret; + } + + ret = s6e63m0_ldi_enable(lcd); + if (ret) { + dev_err(lcd->dev, "failed to enable ldi.\n"); + return ret; + } + + /* set brightness to current value after power on or resume. */ + ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness); + if (ret) { + dev_err(lcd->dev, "lcd gamma setting failed.\n"); + return ret; + } + + return 0; +} + +static int s6e63m0_power_off(struct s6e63m0 *lcd) +{ + int ret = 0; + struct lcd_platform_data *pd = NULL; + + pd = lcd->lcd_pd; + if (!pd) { + dev_err(lcd->dev, "platform data is NULL.\n"); + return -EFAULT; + } + + ret = s6e63m0_ldi_disable(lcd); + if (ret) { + dev_err(lcd->dev, "lcd setting failed.\n"); + return -EIO; + } + + mdelay(pd->power_off_delay); + + if (!pd->power_on) { + dev_err(lcd->dev, "power_on is NULL.\n"); + return -EFAULT; + } else + pd->power_on(lcd->ld, 0); + + return 0; +} + +static int s6e63m0_power(struct s6e63m0 *lcd, int power) +{ + int ret = 0; + + if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) + ret = s6e63m0_power_on(lcd); + else if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) + ret = s6e63m0_power_off(lcd); + + if (!ret) + lcd->power = power; + + return ret; +} + +static int s6e63m0_set_power(struct lcd_device *ld, int power) +{ + struct s6e63m0 *lcd = lcd_get_data(ld); + + if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN && + power != FB_BLANK_NORMAL) { + dev_err(lcd->dev, "power value should be 0, 1 or 4.\n"); + return -EINVAL; + } + + return s6e63m0_power(lcd, power); +} + +static int s6e63m0_get_power(struct lcd_device *ld) +{ + struct s6e63m0 *lcd = lcd_get_data(ld); + + return lcd->power; +} + +static int s6e63m0_get_brightness(struct backlight_device *bd) +{ + return bd->props.brightness; +} + +static int s6e63m0_set_brightness(struct backlight_device *bd) +{ + int ret = 0, brightness = bd->props.brightness; + struct s6e63m0 *lcd = bl_get_data(bd); + + if (brightness < MIN_BRIGHTNESS || + brightness > bd->props.max_brightness) { + dev_err(&bd->dev, "lcd brightness should be %d to %d.\n", + MIN_BRIGHTNESS, MAX_BRIGHTNESS); + return -EINVAL; + } + + ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness); + if (ret) { + dev_err(&bd->dev, "lcd brightness setting failed.\n"); + return -EIO; + } + + return ret; +} + +static struct lcd_ops s6e63m0_lcd_ops = { + .set_power = s6e63m0_set_power, + .get_power = s6e63m0_get_power, +}; + +static const struct backlight_ops s6e63m0_backlight_ops = { + .get_brightness = s6e63m0_get_brightness, + .update_status = s6e63m0_set_brightness, +}; + +static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + char temp[10]; + + switch (lcd->gamma_mode) { + case 0: + sprintf(temp, "2.2 mode\n"); + strcat(buf, temp); + break; + case 1: + sprintf(temp, "1.9 mode\n"); + strcat(buf, temp); + break; + case 2: + sprintf(temp, "1.7 mode\n"); + strcat(buf, temp); + break; + default: + dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n"); + break; + } + + return strlen(buf); +} + +static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + struct backlight_device *bd = NULL; + int brightness, rc; + + rc = strict_strtoul(buf, 0, (unsigned long *)&lcd->gamma_mode); + if (rc < 0) + return rc; + + bd = lcd->bd; + + brightness = bd->props.brightness; + + switch (lcd->gamma_mode) { + case 0: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]); + break; + case 1: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]); + break; + case 2: + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]); + break; + default: + dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n"); + _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]); + break; + } + return len; +} + +static DEVICE_ATTR(gamma_mode, 0644, + s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode); + +static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct s6e63m0 *lcd = dev_get_drvdata(dev); + char temp[3]; + + sprintf(temp, "%d\n", lcd->gamma_table_count); + strcpy(buf, temp); + + return strlen(buf); +} +static DEVICE_ATTR(gamma_table, 0644, + s6e63m0_sysfs_show_gamma_table, NULL); + +static int __init s6e63m0_probe(struct spi_device *spi) +{ + int ret = 0; + struct s6e63m0 *lcd = NULL; + struct lcd_device *ld = NULL; + struct backlight_device *bd = NULL; + + lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL); + if (!lcd) + return -ENOMEM; + + /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */ + spi->bits_per_word = 9; + + ret = spi_setup(spi); + if (ret < 0) { + dev_err(&spi->dev, "spi setup failed.\n"); + goto out_free_lcd; + } + + lcd->spi = spi; + lcd->dev = &spi->dev; + + lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data; + if (!lcd->lcd_pd) { + dev_err(&spi->dev, "platform data is NULL.\n"); + goto out_free_lcd; + } + + ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops); + if (IS_ERR(ld)) { + ret = PTR_ERR(ld); + goto out_free_lcd; + } + + lcd->ld = ld; + + bd = backlight_device_register("s6e63m0bl-bl", &spi->dev, lcd, + &s6e63m0_backlight_ops, NULL); + if (IS_ERR(bd)) { + ret = PTR_ERR(bd); + goto out_lcd_unregister; + } + + bd->props.max_brightness = MAX_BRIGHTNESS; + bd->props.brightness = MAX_BRIGHTNESS; + lcd->bd = bd; + + /* + * it gets gamma table count available so it gets user + * know that. + */ + lcd->gamma_table_count = + sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int)); + + ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode); + if (ret < 0) + dev_err(&(spi->dev), "failed to add sysfs entries\n"); + + ret = device_create_file(&(spi->dev), &dev_attr_gamma_table); + if (ret < 0) + dev_err(&(spi->dev), "failed to add sysfs entries\n"); + + /* + * if lcd panel was on from bootloader like u-boot then + * do not lcd on. + */ + if (!lcd->lcd_pd->lcd_enabled) { + /* + * if lcd panel was off from bootloader then + * current lcd status is powerdown and then + * it enables lcd panel. + */ + lcd->power = FB_BLANK_POWERDOWN; + + s6e63m0_power(lcd, FB_BLANK_UNBLANK); + } else + lcd->power = FB_BLANK_UNBLANK; + + dev_set_drvdata(&spi->dev, lcd); + + dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n"); + + return 0; + +out_lcd_unregister: + lcd_device_unregister(ld); +out_free_lcd: + kfree(lcd); + return ret; +} + +static int __devexit s6e63m0_remove(struct spi_device *spi) +{ + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + s6e63m0_power(lcd, FB_BLANK_POWERDOWN); + lcd_device_unregister(lcd->ld); + kfree(lcd); + + return 0; +} + +#if defined(CONFIG_PM) +unsigned int before_power; + +static int s6e63m0_suspend(struct spi_device *spi, pm_message_t mesg) +{ + int ret = 0; + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + dev_dbg(&spi->dev, "lcd->power = %d\n", lcd->power); + + before_power = lcd->power; + + /* + * when lcd panel is suspend, lcd panel becomes off + * regardless of status. + */ + ret = s6e63m0_power(lcd, FB_BLANK_POWERDOWN); + + return ret; +} + +static int s6e63m0_resume(struct spi_device *spi) +{ + int ret = 0; + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + /* + * after suspended, if lcd panel status is FB_BLANK_UNBLANK + * (at that time, before_power is FB_BLANK_UNBLANK) then + * it changes that status to FB_BLANK_POWERDOWN to get lcd on. + */ + if (before_power == FB_BLANK_UNBLANK) + lcd->power = FB_BLANK_POWERDOWN; + + dev_dbg(&spi->dev, "before_power = %d\n", before_power); + + ret = s6e63m0_power(lcd, before_power); + + return ret; +} +#else +#define s6e63m0_suspend NULL +#define s6e63m0_resume NULL +#endif + +/* Power down all displays on reboot, poweroff or halt. */ +static void s6e63m0_shutdown(struct spi_device *spi) +{ + struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); + + s6e63m0_power(lcd, FB_BLANK_POWERDOWN); +} + +static struct spi_driver s6e63m0_driver = { + .driver = { + .name = "s6e63m0", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = s6e63m0_probe, + .remove = __devexit_p(s6e63m0_remove), + .shutdown = s6e63m0_shutdown, + .suspend = s6e63m0_suspend, + .resume = s6e63m0_resume, +}; + +static int __init s6e63m0_init(void) +{ + return spi_register_driver(&s6e63m0_driver); +} + +static void __exit s6e63m0_exit(void) +{ + spi_unregister_driver(&s6e63m0_driver); +} + +module_init(s6e63m0_init); +module_exit(s6e63m0_exit); + +MODULE_AUTHOR("InKi Dae "); +MODULE_DESCRIPTION("S6E63M0 LCD Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h new file mode 100644 index 000000000000..2c44bdb0696b --- /dev/null +++ b/drivers/video/backlight/s6e63m0_gamma.h @@ -0,0 +1,266 @@ +/* linux/drivers/video/samsung/s6e63m0_brightness.h + * + * Gamma level definitions. + * + * Copyright (c) 2009 Samsung Electronics + * InKi Dae + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef _S6E63M0_BRIGHTNESS_H +#define _S6E63M0_BRIGHTNESS_H + +#define MAX_GAMMA_LEVEL 11 +#define GAMMA_TABLE_COUNT 21 + +/* gamma value: 2.2 */ +static const unsigned int s6e63m0_22_300[] = { + 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6, + 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0, + 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb +}; + +static const unsigned int s6e63m0_22_280[] = { + 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6, + 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1, + 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 +}; + +static const unsigned int s6e63m0_22_260[] = { + 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6, + 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2, + 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 + +}; + +static const unsigned int s6e63m0_22_240[] = { + 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9, + 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3, + 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA + +}; +static const unsigned int s6e63m0_22_220[] = { + 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8, + 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4, + 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 +}; + +static const unsigned int s6e63m0_22_200[] = { + 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA, + 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6, + 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA +}; + +static const unsigned int s6e63m0_22_170[] = { + 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB, + 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8, + 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB +}; + +static const unsigned int s6e63m0_22_140[] = { + 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC, + 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9, + 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E +}; + +static const unsigned int s6e63m0_22_110[] = { + 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF, + 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC, + 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_22_90[] = { + 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0, + 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF, + 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 +}; + +static const unsigned int s6e63m0_22_30[] = { + 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8, + 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7, + 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 +}; + +/* gamma value: 1.9 */ +static const unsigned int s6e63m0_19_300[] = { + 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA, + 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5, + 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB +}; + +static const unsigned int s6e63m0_19_280[] = { + 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB, + 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7, + 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5 +}; + +static const unsigned int s6e63m0_19_260[] = { + 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA, + 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8, + 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF +}; + +static const unsigned int s6e63m0_19_240[] = { + 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB, + 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9, + 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8 +}; + +static const unsigned int s6e63m0_19_220[] = { + 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC, + 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA, + 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0 +}; + +static const unsigned int s6e63m0_19_200[] = { + 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE, + 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB, + 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8 +}; + +static const unsigned int s6e63m0_19_170[] = { + 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF, + 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD, + 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA +}; + +static const unsigned int s6e63m0_19_140[] = { + 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0, + 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0, + 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C +}; + +static const unsigned int s6e63m0_19_110[] = { + 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2, + 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1, + 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_19_90[] = { + 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3, + 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3, + 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81 +}; + +static const unsigned int s6e63m0_19_30[] = { + 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA, + 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA, + 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E +}; + +/* gamma value: 1.7 */ +static const unsigned int s6e63m0_17_300[] = { + 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF, + 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD, + 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB +}; + +static const unsigned int s6e63m0_17_280[] = { + 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF, + 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD, + 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 +}; + +static const unsigned int s6e63m0_17_260[] = { + 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0, + 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF, + 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 +}; + +static const unsigned int s6e63m0_17_240[] = { + 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2, + 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF, + 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA +}; + +static const unsigned int s6e63m0_17_220[] = { + 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2, + 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1, + 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 +}; + +static const unsigned int s6e63m0_17_200[] = { + 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2, + 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1, + 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA +}; + +static const unsigned int s6e63m0_17_170[] = { + 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3, + 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3, + 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB +}; + +static const unsigned int s6e63m0_17_140[] = { + 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3, + 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4, + 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E +}; + +static const unsigned int s6e63m0_17_110[] = { + 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6, + 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8, + 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D +}; + +static const unsigned int s6e63m0_17_90[] = { + 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8, + 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8, + 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 +}; + +static const unsigned int s6e63m0_17_30[] = { + 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1, + 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0, + 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 +}; + +struct s6e63m0_gamma { + unsigned int *gamma_22_table[MAX_GAMMA_LEVEL]; + unsigned int *gamma_19_table[MAX_GAMMA_LEVEL]; + unsigned int *gamma_17_table[MAX_GAMMA_LEVEL]; +}; + +static struct s6e63m0_gamma gamma_table = { + .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30, + .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90, + .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110, + .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140, + .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170, + .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200, + .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220, + .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240, + .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260, + .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280, + .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300, + + .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30, + .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90, + .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110, + .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140, + .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170, + .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200, + .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220, + .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240, + .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260, + .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280, + .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300, + + .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30, + .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90, + .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110, + .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140, + .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170, + .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200, + .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220, + .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240, + .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260, + .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280, + .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300, +}; + +#endif + diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 23b2a8c0dbfc..b020ba7f1cf2 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c @@ -501,7 +501,9 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id) static int __devinit bfin_bf54x_probe(struct platform_device *pdev) { +#ifndef NO_BL_SUPPORT struct backlight_properties props; +#endif struct bfin_bf54xfb_info *info; struct fb_info *fbinfo; int ret; @@ -654,7 +656,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) printk(KERN_ERR DRIVER_NAME ": unable to register backlight.\n"); ret = -EINVAL; - goto out9; + unregister_framebuffer(fbinfo); + goto out8; } lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); @@ -663,8 +666,6 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) return 0; -out9: - unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index c2ec3dcd4e91..7a50272eaab9 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c @@ -420,7 +420,9 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id) static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) { +#ifndef NO_BL_SUPPORT struct backlight_properties props; +#endif struct bfin_t350mcqbfb_info *info; struct fb_info *fbinfo; int ret; @@ -550,7 +552,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) printk(KERN_ERR DRIVER_NAME ": unable to register backlight.\n"); ret = -EINVAL; - goto out9; + unregister_framebuffer(fbinfo); + goto out8; } lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); @@ -559,8 +562,6 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) return 0; -out9: - unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 1105a591dcc1..073c9b408cf7 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -66,7 +66,7 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, return 0; } -int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync) +int fb_deferred_io_fsync(struct file *file, int datasync) { struct fb_info *info = file->private_data; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index d4471b4c0374..dce8c97b4333 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -71,7 +71,8 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", "S3 Trio64UV+", "S3 Trio64V2/DX", "S3 Trio64V2/GX", "S3 Plato/PX", "S3 Aurora64VP", "S3 Virge", "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", - "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P"}; + "S3 Virge/GX2", "S3 Virge/GX2P", "S3 Virge/GX2P", + "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X"}; #define CHIP_UNKNOWN 0x00 #define CHIP_732_TRIO32 0x01 @@ -89,10 +90,14 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_356_VIRGE_GX2 0x0D #define CHIP_357_VIRGE_GX2P 0x0E #define CHIP_359_VIRGE_GX2P 0x0F +#define CHIP_360_TRIO3D_1X 0x10 +#define CHIP_362_TRIO3D_2X 0x11 +#define CHIP_368_TRIO3D_2X 0x12 #define CHIP_XXX_TRIO 0x80 #define CHIP_XXX_TRIO64V2_DXGX 0x81 #define CHIP_XXX_VIRGE_DXGX 0x82 +#define CHIP_36X_TRIO3D_1X_2X 0x83 #define CHIP_UNDECIDED_FLAG 0x80 #define CHIP_MASK 0xFF @@ -324,6 +329,7 @@ static void s3fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) static void s3_set_pixclock(struct fb_info *info, u32 pixclock) { + struct s3fb_info *par = info->par; u16 m, n, r; u8 regval; int rv; @@ -339,7 +345,13 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock) vga_w(NULL, VGA_MIS_W, regval | VGA_MIS_ENB_PLL_LOAD); /* Set S3 clock registers */ - vga_wseq(NULL, 0x12, ((n - 2) | (r << 5))); + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + vga_wseq(NULL, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ + vga_wseq(NULL, 0x29, r >> 2); /* remaining highest bit of r */ + } else + vga_wseq(NULL, 0x12, (n - 2) | (r << 5)); vga_wseq(NULL, 0x13, m - 2); udelay(1000); @@ -456,7 +468,7 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) static int s3fb_set_par(struct fb_info *info) { struct s3fb_info *par = info->par; - u32 value, mode, hmul, offset_value, screen_size, multiplex; + u32 value, mode, hmul, offset_value, screen_size, multiplex, dbytes; u32 bpp = info->var.bits_per_pixel; if (bpp != 0) { @@ -518,7 +530,7 @@ static int s3fb_set_par(struct fb_info *info) svga_wcrt_mask(0x33, 0x00, 0x08); /* no DDR ? */ svga_wcrt_mask(0x43, 0x00, 0x01); /* no DDR ? */ - svga_wcrt_mask(0x5D, 0x00, 0x28); // Clear strange HSlen bits + svga_wcrt_mask(0x5D, 0x00, 0x28); /* Clear strange HSlen bits */ /* svga_wcrt_mask(0x58, 0x03, 0x03); */ @@ -530,10 +542,14 @@ static int s3fb_set_par(struct fb_info *info) pr_debug("fb%d: offset register : %d\n", info->node, offset_value); svga_wcrt_multi(s3_offset_regs, offset_value); - vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ - vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ - vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ - vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) { + vga_wcrt(NULL, 0x54, 0x18); /* M parameter */ + vga_wcrt(NULL, 0x60, 0xff); /* N parameter */ + vga_wcrt(NULL, 0x61, 0xff); /* L parameter */ + vga_wcrt(NULL, 0x62, 0xff); /* L parameter */ + } vga_wcrt(NULL, 0x3A, 0x35); svga_wattr(0x33, 0x00); @@ -570,6 +586,16 @@ static int s3fb_set_par(struct fb_info *info) vga_wcrt(NULL, 0x66, 0x90); } + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + dbytes = info->var.xres * ((bpp+7)/8); + vga_wcrt(NULL, 0x91, (dbytes + 7) / 8); + vga_wcrt(NULL, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80); + + vga_wcrt(NULL, 0x66, 0x81); + } + svga_wcrt_mask(0x31, 0x00, 0x40); multiplex = 0; hmul = 1; @@ -615,11 +641,13 @@ static int s3fb_set_par(struct fb_info *info) break; case 3: pr_debug("fb%d: 8 bit pseudocolor\n", info->node); - if (info->var.pixclock > 20000) { - svga_wcrt_mask(0x50, 0x00, 0x30); + svga_wcrt_mask(0x50, 0x00, 0x30); + if (info->var.pixclock > 20000 || + par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) svga_wcrt_mask(0x67, 0x00, 0xF0); - } else { - svga_wcrt_mask(0x50, 0x00, 0x30); + else { svga_wcrt_mask(0x67, 0x10, 0xF0); multiplex = 1; } @@ -634,7 +662,10 @@ static int s3fb_set_par(struct fb_info *info) } else { svga_wcrt_mask(0x50, 0x10, 0x30); svga_wcrt_mask(0x67, 0x30, 0xF0); - hmul = 2; + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) + hmul = 2; } break; case 5: @@ -647,7 +678,10 @@ static int s3fb_set_par(struct fb_info *info) } else { svga_wcrt_mask(0x50, 0x10, 0x30); svga_wcrt_mask(0x67, 0x50, 0xF0); - hmul = 2; + if (par->chip != CHIP_360_TRIO3D_1X && + par->chip != CHIP_362_TRIO3D_2X && + par->chip != CHIP_368_TRIO3D_2X) + hmul = 2; } break; case 6: @@ -866,6 +900,17 @@ static int __devinit s3_identification(int chip) return CHIP_385_VIRGE_GX; } + if (chip == CHIP_36X_TRIO3D_1X_2X) { + switch (vga_rcrt(NULL, 0x2f)) { + case 0x00: + return CHIP_360_TRIO3D_1X; + case 0x01: + return CHIP_362_TRIO3D_2X; + case 0x02: + return CHIP_368_TRIO3D_2X; + } + } + return CHIP_UNKNOWN; } @@ -930,17 +975,32 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i vga_wcrt(NULL, 0x38, 0x48); vga_wcrt(NULL, 0x39, 0xA5); - /* Find how many physical memory there is on card */ - /* 0x36 register is accessible even if other registers are locked */ - regval = vga_rcrt(NULL, 0x36); - info->screen_size = s3_memsizes[regval >> 5] << 10; - info->fix.smem_len = info->screen_size; - + /* Identify chip type */ par->chip = id->driver_data & CHIP_MASK; par->rev = vga_rcrt(NULL, 0x2f); if (par->chip & CHIP_UNDECIDED_FLAG) par->chip = s3_identification(par->chip); + /* Find how many physical memory there is on card */ + /* 0x36 register is accessible even if other registers are locked */ + regval = vga_rcrt(NULL, 0x36); + if (par->chip == CHIP_360_TRIO3D_1X || + par->chip == CHIP_362_TRIO3D_2X || + par->chip == CHIP_368_TRIO3D_2X) { + switch ((regval & 0xE0) >> 5) { + case 0: /* 8MB -- only 4MB usable for display */ + case 1: /* 4MB with 32-bit bus */ + case 2: /* 4MB */ + info->screen_size = 4 << 20; + break; + case 6: /* 2MB */ + info->screen_size = 2 << 20; + break; + } + } else + info->screen_size = s3_memsizes[regval >> 5] << 10; + info->fix.smem_len = info->screen_size; + /* Find MCLK frequency */ regval = vga_rseq(NULL, 0x10); par->mclk_freq = ((vga_rseq(NULL, 0x11) + 2) * 14318) / ((regval & 0x1F) + 2); @@ -1131,6 +1191,7 @@ static struct pci_device_id s3_devices[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A10), .driver_data = CHIP_356_VIRGE_GX2}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A11), .driver_data = CHIP_357_VIRGE_GX2P}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, + {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c index 2bc40e682f95..1082541358f0 100644 --- a/drivers/video/via/viafbdev.c +++ b/drivers/video/via/viafbdev.c @@ -578,14 +578,9 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) break; case VIAFB_SET_GAMMA_LUT: - viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); - if (!viafb_gamma_table) - return -ENOMEM; - if (copy_from_user(viafb_gamma_table, argp, - 256 * sizeof(u32))) { - kfree(viafb_gamma_table); - return -EFAULT; - } + viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); + if (IS_ERR(viafb_gamma_table)) + return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); kfree(viafb_gamma_table); break; diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 69c6adbd8205..428f8a1583e8 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -1,7 +1,7 @@ /* * RDC321x watchdog driver * - * Copyright (C) 2007 Florian Fainelli + * Copyright (C) 2007-2010 Florian Fainelli * * This driver is highly inspired from the cpu5_wdt driver * @@ -36,8 +36,7 @@ #include #include #include - -#include +#include #define RDC_WDT_MASK 0x80000000 /* Mask */ #define RDC_WDT_EN 0x00800000 /* Enable bit */ @@ -63,6 +62,8 @@ static struct { int default_ticks; unsigned long inuse; spinlock_t lock; + struct pci_dev *sb_pdev; + int base_reg; } rdc321x_wdt_device; /* generic helper functions */ @@ -70,14 +71,18 @@ static struct { static void rdc321x_wdt_trigger(unsigned long unused) { unsigned long flags; + u32 val; if (rdc321x_wdt_device.running) ticks--; /* keep watchdog alive */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - outl(RDC_WDT_EN | inl(RDC3210_CFGREG_DATA), - RDC3210_CFGREG_DATA); + pci_read_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, &val); + val |= RDC_WDT_EN; + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, val); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); /* requeue?? */ @@ -105,10 +110,13 @@ static void rdc321x_wdt_start(void) /* Clear the timer */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - outl(RDC_CLS_TMR, RDC3210_CFGREG_ADDR); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, RDC_CLS_TMR); /* Enable watchdog and set the timeout to 81.92 us */ - outl(RDC_WDT_EN | RDC_WDT_CNT, RDC3210_CFGREG_DATA); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, + RDC_WDT_EN | RDC_WDT_CNT); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); mod_timer(&rdc321x_wdt_device.timer, @@ -148,7 +156,7 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; - unsigned int value; + u32 value; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET, .identity = "RDC321x WDT", @@ -162,9 +170,10 @@ static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_GETSTATUS: /* Read the value from the DATA register */ spin_lock_irqsave(&rdc321x_wdt_device.lock, flags); - value = inl(RDC3210_CFGREG_DATA); + pci_read_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, &value); spin_unlock_irqrestore(&rdc321x_wdt_device.lock, flags); - if (copy_to_user(argp, &value, sizeof(int))) + if (copy_to_user(argp, &value, sizeof(u32))) return -EFAULT; break; case WDIOC_GETSUPPORT: @@ -219,17 +228,35 @@ static struct miscdevice rdc321x_wdt_misc = { static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) { int err; + struct resource *r; + struct rdc321x_wdt_pdata *pdata; + + pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "no platform data supplied\n"); + return -ENODEV; + } + + r = platform_get_resource_byname(pdev, IORESOURCE_IO, "wdt-reg"); + if (!r) { + dev_err(&pdev->dev, "failed to get wdt-reg resource\n"); + return -ENODEV; + } + + rdc321x_wdt_device.sb_pdev = pdata->sb_pdev; + rdc321x_wdt_device.base_reg = r->start; err = misc_register(&rdc321x_wdt_misc); if (err < 0) { - printk(KERN_ERR PFX "watchdog misc_register failed\n"); + dev_err(&pdev->dev, "misc_register failed\n"); return err; } spin_lock_init(&rdc321x_wdt_device.lock); /* Reset the watchdog */ - outl(RDC_WDT_RST, RDC3210_CFGREG_DATA); + pci_write_config_dword(rdc321x_wdt_device.sb_pdev, + rdc321x_wdt_device.base_reg, RDC_WDT_RST); init_completion(&rdc321x_wdt_device.stop); rdc321x_wdt_device.queue = 0; @@ -240,7 +267,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) rdc321x_wdt_device.default_ticks = ticks; - printk(KERN_INFO PFX "watchdog init success\n"); + dev_info(&pdev->dev, "watchdog init success\n"); return 0; } diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 25b300e1c9d7..2bedc6c94fc2 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -257,15 +257,13 @@ v9fs_file_write(struct file *filp, const char __user * data, return total; } -static int v9fs_file_fsync(struct file *filp, struct dentry *dentry, - int datasync) +static int v9fs_file_fsync(struct file *filp, int datasync) { struct p9_fid *fid; struct p9_wstat wstat; int retval; - P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp, - dentry, datasync); + P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); fid = filp->private_data; v9fs_blank_wstat(&wstat); diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index 23aa52f548a0..f4287e4de744 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -197,7 +197,7 @@ const struct file_operations adfs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, .readdir = adfs_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, }; static int diff --git a/fs/adfs/file.c b/fs/adfs/file.c index 005ea34d1758..a36da5382b40 100644 --- a/fs/adfs/file.c +++ b/fs/adfs/file.c @@ -26,7 +26,7 @@ const struct file_operations adfs_file_operations = { .read = do_sync_read, .aio_read = generic_file_aio_read, .mmap = generic_file_mmap, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .write = do_sync_write, .aio_write = generic_file_aio_write, .splice_read = generic_file_splice_read, diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 0f5e30978135..6f850b06ab62 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -322,8 +322,9 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr) if (error) goto out; + /* XXX: this is missing some actual on-disk truncation.. */ if (ia_valid & ATTR_SIZE) - error = vmtruncate(inode, attr->ia_size); + error = simple_setsize(inode, attr->ia_size); if (error) goto out; diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 861dae68ac12..f05b6155ccc8 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -183,7 +183,7 @@ extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dent void affs_free_prealloc(struct inode *inode); extern void affs_truncate(struct inode *); -int affs_file_fsync(struct file *, struct dentry *, int); +int affs_file_fsync(struct file *, int); /* dir.c */ diff --git a/fs/affs/file.c b/fs/affs/file.c index 184e55c1c9ba..322710c3eedf 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -916,9 +916,9 @@ affs_truncate(struct inode *inode) affs_free_prealloc(inode); } -int affs_file_fsync(struct file *filp, struct dentry *dentry, int datasync) +int affs_file_fsync(struct file *filp, int datasync) { - struct inode * inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; int ret, err; ret = write_inode_now(inode, 0); diff --git a/fs/affs/namei.c b/fs/affs/namei.c index d70bbbac6b7b..914d1c0bc07a 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -224,7 +224,7 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) affs_brelse(bh); inode = affs_iget(sb, ino); if (IS_ERR(inode)) - return ERR_PTR(PTR_ERR(inode)); + return ERR_CAST(inode); } dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations; d_add(dentry, inode); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 807f284cc75e..5f679b77ce24 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -740,7 +740,7 @@ extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); extern int afs_writeback_all(struct afs_vnode *); -extern int afs_fsync(struct file *, struct dentry *, int); +extern int afs_fsync(struct file *, int); /*****************************************************************************/ diff --git a/fs/afs/server.c b/fs/afs/server.c index f49099516675..9fdc7fe3a7bc 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -91,9 +91,10 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell, memcpy(&server->addr, addr, sizeof(struct in_addr)); server->addr.s_addr = addr->s_addr; + _leave(" = %p{%d}", server, atomic_read(&server->usage)); + } else { + _leave(" = NULL [nomem]"); } - - _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; } diff --git a/fs/afs/write.c b/fs/afs/write.c index 3bed54a294d4..3dab9e9948d0 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -701,8 +701,9 @@ int afs_writeback_all(struct afs_vnode *vnode) * - the return status from this call provides a reliable indication of * whether any write errors occurred for this process. */ -int afs_fsync(struct file *file, struct dentry *dentry, int datasync) +int afs_fsync(struct file *file, int datasync) { + struct dentry *dentry = file->f_path.dentry; struct afs_writeback *wb, *xwb; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); int ret; diff --git a/fs/aio.c b/fs/aio.c index 1cf12b3dd83a..1ccf25cef1f0 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -526,7 +527,7 @@ static void aio_fput_routine(struct work_struct *data) /* Complete the fput(s) */ if (req->ki_filp != NULL) - __fput(req->ki_filp); + fput(req->ki_filp); /* Link the iocb into the context's free list */ spin_lock_irq(&ctx->ctx_lock); @@ -559,11 +560,11 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) /* * Try to optimize the aio and eventfd file* puts, by avoiding to - * schedule work in case it is not __fput() time. In normal cases, + * schedule work in case it is not final fput() time. In normal cases, * we would not be holding the last reference to the file*, so * this function will be executed w/out any aio kthread wakeup. */ - if (unlikely(atomic_long_dec_and_test(&req->ki_filp->f_count))) { + if (unlikely(!fput_atomic(req->ki_filp))) { get_ioctx(ctx); spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); @@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb) return ret; } -static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) +static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) { ssize_t ret; - ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, - kiocb->ki_nbytes, 1, - &kiocb->ki_inline_vec, &kiocb->ki_iovec); +#ifdef CONFIG_COMPAT + if (compat) + ret = compat_rw_copy_check_uvector(type, + (struct compat_iovec __user *)kiocb->ki_buf, + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, + &kiocb->ki_iovec); + else +#endif + ret = rw_copy_check_uvector(type, + (struct iovec __user *)kiocb->ki_buf, + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, + &kiocb->ki_iovec); if (ret < 0) goto out; @@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb) * Performs the initial checks and aio retry method * setup for the kiocb at the time of io submission. */ -static ssize_t aio_setup_iocb(struct kiocb *kiocb) +static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) { struct file *file = kiocb->ki_filp; ssize_t ret = 0; @@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) ret = security_file_permission(file, MAY_READ); if (unlikely(ret)) break; - ret = aio_setup_vectored_rw(READ, kiocb); + ret = aio_setup_vectored_rw(READ, kiocb, compat); if (ret) break; ret = -EINVAL; @@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) ret = security_file_permission(file, MAY_WRITE); if (unlikely(ret)) break; - ret = aio_setup_vectored_rw(WRITE, kiocb); + ret = aio_setup_vectored_rw(WRITE, kiocb, compat); if (ret) break; ret = -EINVAL; @@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash) } static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, - struct iocb *iocb, struct hlist_head *batch_hash) + struct iocb *iocb, struct hlist_head *batch_hash, + bool compat) { struct kiocb *req; struct file *file; @@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, req->ki_left = req->ki_nbytes = iocb->aio_nbytes; req->ki_opcode = iocb->aio_lio_opcode; - ret = aio_setup_iocb(req); + ret = aio_setup_iocb(req, compat); if (ret) goto out_put_req; @@ -1637,20 +1648,8 @@ out_put_req: return ret; } -/* sys_io_submit: - * Queue the nr iocbs pointed to by iocbpp for processing. Returns - * the number of iocbs queued. May return -EINVAL if the aio_context - * specified by ctx_id is invalid, if nr is < 0, if the iocb at - * *iocbpp[0] is not properly initialized, if the operation specified - * is invalid for the file descriptor in the iocb. May fail with - * -EFAULT if any of the data structures point to invalid data. May - * fail with -EBADF if the file descriptor specified in the first - * iocb is invalid. May fail with -EAGAIN if insufficient resources - * are available to queue any iocbs. Will return 0 if nr is 0. Will - * fail with -ENOSYS if not implemented. - */ -SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, - struct iocb __user * __user *, iocbpp) +long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user *__user *iocbpp, bool compat) { struct kioctx *ctx; long ret = 0; @@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, break; } - ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash); + ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); if (ret) break; } @@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, return i ? i : ret; } +/* sys_io_submit: + * Queue the nr iocbs pointed to by iocbpp for processing. Returns + * the number of iocbs queued. May return -EINVAL if the aio_context + * specified by ctx_id is invalid, if nr is < 0, if the iocb at + * *iocbpp[0] is not properly initialized, if the operation specified + * is invalid for the file descriptor in the iocb. May fail with + * -EFAULT if any of the data structures point to invalid data. May + * fail with -EBADF if the file descriptor specified in the first + * iocb is invalid. May fail with -EAGAIN if insufficient resources + * are available to queue any iocbs. Will return 0 if nr is 0. Will + * fail with -ENOSYS if not implemented. + */ +SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, + struct iocb __user * __user *, iocbpp) +{ + return do_io_submit(ctx_id, nr, iocbpp, 0); +} + /* lookup_kiocb * Finds a given iocb for cancellation. */ diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 9bd4b3876c99..e4b75d6eda83 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -205,7 +205,7 @@ static struct inode *anon_inode_mkinode(void) * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; - inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; + inode->i_mode = S_IRUSR | S_IWUSR; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_flags |= S_PRIVATE; diff --git a/fs/attr.c b/fs/attr.c index 0815e93bb487..b4fa3b0aa596 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -67,14 +67,14 @@ EXPORT_SYMBOL(inode_change_ok); * @offset: the new size to assign to the inode * @Returns: 0 on success, -ve errno on failure * + * inode_newsize_ok must be called with i_mutex held. + * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ * when necessary. Caller must not proceed with inode size change if failure is * returned. @inode must be a file (not directory), with appropriate * permissions to allow truncate (inode_newsize_ok does NOT check these * conditions). - * - * inode_newsize_ok must be called with i_mutex held. */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { @@ -104,17 +104,25 @@ out_big: } EXPORT_SYMBOL(inode_newsize_ok); -int inode_setattr(struct inode * inode, struct iattr * attr) +/** + * generic_setattr - copy simple metadata updates into the generic inode + * @inode: the inode to be updated + * @attr: the new attributes + * + * generic_setattr must be called with i_mutex held. + * + * generic_setattr updates the inode's metadata with that specified + * in attr. Noticably missing is inode size update, which is more complex + * as it requires pagecache updates. See simple_setsize. + * + * The inode is not marked as dirty after this operation. The rationale is + * that for "simple" filesystems, the struct inode is the inode storage. + * The caller is free to mark the inode dirty afterwards if needed. + */ +void generic_setattr(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; - if (ia_valid & ATTR_SIZE && - attr->ia_size != i_size_read(inode)) { - int error = vmtruncate(inode, attr->ia_size); - if (error) - return error; - } - if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) @@ -135,6 +143,28 @@ int inode_setattr(struct inode * inode, struct iattr * attr) mode &= ~S_ISGID; inode->i_mode = mode; } +} +EXPORT_SYMBOL(generic_setattr); + +/* + * note this function is deprecated, the new truncate sequence should be + * used instead -- see eg. simple_setsize, generic_setattr. + */ +int inode_setattr(struct inode *inode, const struct iattr *attr) +{ + unsigned int ia_valid = attr->ia_valid; + + if (ia_valid & ATTR_SIZE && + attr->ia_size != i_size_read(inode)) { + int error; + + error = vmtruncate(inode, attr->ia_size); + if (error) + return error; + } + + generic_setattr(inode, attr); + mark_inode_dirty(inode); return 0; diff --git a/fs/autofs/root.c b/fs/autofs/root.c index 8713c7cfbc79..9a0520b50663 100644 --- a/fs/autofs/root.c +++ b/fs/autofs/root.c @@ -28,6 +28,7 @@ static int autofs_root_mkdir(struct inode *,struct dentry *,int); static int autofs_root_ioctl(struct inode *, struct file *,unsigned int,unsigned long); const struct file_operations autofs_root_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = autofs_root_readdir, .ioctl = autofs_root_ioctl, diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index d832062869f6..ba4a38b9c22f 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) */ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) { - struct autofs_dev_ioctl tmp, *ads; + struct autofs_dev_ioctl tmp; if (copy_from_user(&tmp, in, sizeof(tmp))) return ERR_PTR(-EFAULT); @@ -103,16 +103,7 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i if (tmp.size < sizeof(tmp)) return ERR_PTR(-EINVAL); - ads = kmalloc(tmp.size, GFP_KERNEL); - if (!ads) - return ERR_PTR(-ENOMEM); - - if (copy_from_user(ads, in, tmp.size)) { - kfree(ads); - return ERR_PTR(-EFAULT); - } - - return ads; + return memdup_user(in, tmp.size); } static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) diff --git a/fs/bad_inode.c b/fs/bad_inode.c index a05287a23f62..52e59bf4aa5f 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -93,8 +93,7 @@ static int bad_file_release(struct inode *inode, struct file *filp) return -EIO; } -static int bad_file_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int bad_file_fsync(struct file *file, int datasync) { return -EIO; } diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 8f73841fc974..d967e052b779 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -78,7 +78,7 @@ static int bfs_readdir(struct file *f, void *dirent, filldir_t filldir) const struct file_operations bfs_dir_operations = { .read = generic_read_dir, .readdir = bfs_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 2c5f9a0e5d72..63039ed9576f 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -990,10 +990,9 @@ static int elf_fdpic_map_file_constdisp_on_uclinux( /* clear any space allocated but not loaded */ if (phdr->p_filesz < phdr->p_memsz) { - ret = clear_user((void *) (seg->addr + phdr->p_filesz), - phdr->p_memsz - phdr->p_filesz); - if (ret) - return ret; + if (clear_user((void *) (seg->addr + phdr->p_filesz), + phdr->p_memsz - phdr->p_filesz)) + return -EFAULT; } if (mm) { @@ -1027,7 +1026,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, struct elf32_fdpic_loadseg *seg; struct elf32_phdr *phdr; unsigned long load_addr, delta_vaddr; - int loop, dvset, ret; + int loop, dvset; load_addr = params->load_addr; delta_vaddr = 0; @@ -1127,9 +1126,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, * PT_LOAD */ if (prot & PROT_WRITE && disp > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp); - ret = clear_user((void __user *) maddr, disp); - if (ret) - return ret; + if (clear_user((void __user *) maddr, disp)) + return -EFAULT; maddr += disp; } @@ -1164,19 +1162,17 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, if (prot & PROT_WRITE && excess1 > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr + phdr->p_filesz, excess1); - ret = clear_user((void __user *) maddr + phdr->p_filesz, - excess1); - if (ret) - return ret; + if (clear_user((void __user *) maddr + phdr->p_filesz, + excess1)) + return -EFAULT; } #else if (excess > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr + phdr->p_filesz, excess); - ret = clear_user((void *) maddr + phdr->p_filesz, excess); - if (ret) - return ret; + if (clear_user((void *) maddr + phdr->p_filesz, excess)) + return -EFAULT; } #endif diff --git a/fs/block_dev.c b/fs/block_dev.c index 26e5f5026620..7346c96308a5 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -172,8 +172,9 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode), - iov, offset, nr_segs, blkdev_get_blocks, NULL); + return blockdev_direct_IO_no_locking_newtrunc(rw, iocb, inode, + I_BDEV(inode), iov, offset, nr_segs, + blkdev_get_blocks, NULL); } int __sync_blockdev(struct block_device *bdev, int wait) @@ -309,8 +310,8 @@ static int blkdev_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { *pagep = NULL; - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - blkdev_get_block); + return block_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, blkdev_get_block); } static int blkdev_write_end(struct file *file, struct address_space *mapping, @@ -358,12 +359,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) return retval; } -/* - * Filp is never NULL; the only case when ->fsync() is called with - * NULL first argument is nfsd_sync_dir() and that's not a directory. - */ - -int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync) +int blkdev_fsync(struct file *filp, int datasync) { struct inode *bd_inode = filp->f_mapping->host; struct block_device *bdev = I_BDEV(bd_inode); diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 462859a30141..7ec14097fef1 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -377,6 +377,7 @@ again: if (!list_empty(&worker->pending) || !list_empty(&worker->prio_pending)) { spin_unlock_irq(&worker->lock); + set_current_state(TASK_RUNNING); goto again; } diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 7a4dee199832..6ad63f17eca0 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -137,8 +137,8 @@ struct btrfs_inode { * of extent items we've reserved metadata for. */ spinlock_t accounting_lock; + atomic_t outstanding_extents; int reserved_extents; - int outstanding_extents; /* * ordered_data_close is set by truncate when a file that used @@ -151,6 +151,7 @@ struct btrfs_inode { * of these. */ unsigned ordered_data_close:1; + unsigned orphan_meta_reserved:1; unsigned dummy_inode:1; /* diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6795a713b205..0d1d966b0fe4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -280,7 +280,8 @@ int btrfs_block_can_be_shared(struct btrfs_root *root, static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, - struct extent_buffer *cow) + struct extent_buffer *cow, + int *last_ref) { u64 refs; u64 owner; @@ -366,6 +367,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, BUG_ON(ret); } clean_tree_block(trans, root, buf); + *last_ref = 1; } return 0; } @@ -392,6 +394,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_disk_key disk_key; struct extent_buffer *cow; int level; + int last_ref = 0; int unlock_orig = 0; u64 parent_start; @@ -442,7 +445,10 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, (unsigned long)btrfs_header_fsid(cow), BTRFS_FSID_SIZE); - update_ref_for_cow(trans, root, buf, cow); + update_ref_for_cow(trans, root, buf, cow, &last_ref); + + if (root->ref_cows) + btrfs_reloc_cow_block(trans, root, buf, cow); if (buf == root->node) { WARN_ON(parent && parent != buf); @@ -457,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, extent_buffer_get(cow); spin_unlock(&root->node_lock); - btrfs_free_tree_block(trans, root, buf->start, buf->len, - parent_start, root->root_key.objectid, level); + btrfs_free_tree_block(trans, root, buf, parent_start, + last_ref); free_extent_buffer(buf); add_root_to_dirty_list(root); } else { @@ -473,8 +479,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_set_node_ptr_generation(parent, parent_slot, trans->transid); btrfs_mark_buffer_dirty(parent); - btrfs_free_tree_block(trans, root, buf->start, buf->len, - parent_start, root->root_key.objectid, level); + btrfs_free_tree_block(trans, root, buf, parent_start, + last_ref); } if (unlock_orig) btrfs_tree_unlock(buf); @@ -949,6 +955,22 @@ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, return bin_search(eb, key, level, slot); } +static void root_add_used(struct btrfs_root *root, u32 size) +{ + spin_lock(&root->accounting_lock); + btrfs_set_root_used(&root->root_item, + btrfs_root_used(&root->root_item) + size); + spin_unlock(&root->accounting_lock); +} + +static void root_sub_used(struct btrfs_root *root, u32 size) +{ + spin_lock(&root->accounting_lock); + btrfs_set_root_used(&root->root_item, + btrfs_root_used(&root->root_item) - size); + spin_unlock(&root->accounting_lock); +} + /* given a node and slot number, this reads the blocks it points to. The * extent buffer is returned with a reference taken (but unlocked). * NULL is returned on error. @@ -1019,7 +1041,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, btrfs_tree_lock(child); btrfs_set_lock_blocking(child); ret = btrfs_cow_block(trans, root, child, mid, 0, &child); - BUG_ON(ret); + if (ret) { + btrfs_tree_unlock(child); + free_extent_buffer(child); + goto enospc; + } spin_lock(&root->node_lock); root->node = child; @@ -1034,11 +1060,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); - ret = btrfs_free_tree_block(trans, root, mid->start, mid->len, - 0, root->root_key.objectid, level); + + root_sub_used(root, mid->len); + btrfs_free_tree_block(trans, root, mid, 0, 1); /* once for the root ptr */ free_extent_buffer(mid); - return ret; + return 0; } if (btrfs_header_nritems(mid) > BTRFS_NODEPTRS_PER_BLOCK(root) / 4) @@ -1088,23 +1115,16 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, if (wret < 0 && wret != -ENOSPC) ret = wret; if (btrfs_header_nritems(right) == 0) { - u64 bytenr = right->start; - u32 blocksize = right->len; - clean_tree_block(trans, root, right); btrfs_tree_unlock(right); - free_extent_buffer(right); - right = NULL; wret = del_ptr(trans, root, path, level + 1, pslot + 1); if (wret) ret = wret; - wret = btrfs_free_tree_block(trans, root, - bytenr, blocksize, 0, - root->root_key.objectid, - level); - if (wret) - ret = wret; + root_sub_used(root, right->len); + btrfs_free_tree_block(trans, root, right, 0, 1); + free_extent_buffer(right); + right = NULL; } else { struct btrfs_disk_key right_key; btrfs_node_key(right, &right_key, 0); @@ -1136,21 +1156,15 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, BUG_ON(wret == 1); } if (btrfs_header_nritems(mid) == 0) { - /* we've managed to empty the middle node, drop it */ - u64 bytenr = mid->start; - u32 blocksize = mid->len; - clean_tree_block(trans, root, mid); btrfs_tree_unlock(mid); - free_extent_buffer(mid); - mid = NULL; wret = del_ptr(trans, root, path, level + 1, pslot); if (wret) ret = wret; - wret = btrfs_free_tree_block(trans, root, bytenr, blocksize, - 0, root->root_key.objectid, level); - if (wret) - ret = wret; + root_sub_used(root, mid->len); + btrfs_free_tree_block(trans, root, mid, 0, 1); + free_extent_buffer(mid); + mid = NULL; } else { /* update the parent key to reflect our changes */ struct btrfs_disk_key mid_key; @@ -1590,7 +1604,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, btrfs_release_path(NULL, p); ret = -EAGAIN; - tmp = read_tree_block(root, blocknr, blocksize, gen); + tmp = read_tree_block(root, blocknr, blocksize, 0); if (tmp) { /* * If the read above didn't mark this buffer up to date, @@ -1740,7 +1754,6 @@ again: p->nodes[level + 1], p->slots[level + 1], &b); if (err) { - free_extent_buffer(b); ret = err; goto done; } @@ -2076,6 +2089,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, if (IS_ERR(c)) return PTR_ERR(c); + root_add_used(root, root->nodesize); + memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_nritems(c, 1); btrfs_set_header_level(c, level); @@ -2134,6 +2149,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root int nritems; BUG_ON(!path->nodes[level]); + btrfs_assert_tree_locked(path->nodes[level]); lower = path->nodes[level]; nritems = btrfs_header_nritems(lower); BUG_ON(slot > nritems); @@ -2202,6 +2218,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans, if (IS_ERR(split)) return PTR_ERR(split); + root_add_used(root, root->nodesize); + memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_level(split, btrfs_header_level(c)); btrfs_set_header_bytenr(split, split->start); @@ -2415,6 +2433,9 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans, if (left_nritems) btrfs_mark_buffer_dirty(left); + else + clean_tree_block(trans, root, left); + btrfs_mark_buffer_dirty(right); btrfs_item_key(right, &disk_key, 0); @@ -2660,6 +2681,8 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(left); if (right_nritems) btrfs_mark_buffer_dirty(right); + else + clean_tree_block(trans, root, right); btrfs_item_key(right, &disk_key, 0); wret = fixup_low_keys(trans, root, path, &disk_key, 1); @@ -2669,8 +2692,6 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans, /* then fixup the leaf pointer in the path */ if (path->slots[0] < push_items) { path->slots[0] += old_left_nritems; - if (btrfs_header_nritems(path->nodes[0]) == 0) - clean_tree_block(trans, root, path->nodes[0]); btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = left; @@ -2932,10 +2953,10 @@ again: right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, root->root_key.objectid, &disk_key, 0, l->start, 0); - if (IS_ERR(right)) { - BUG_ON(1); + if (IS_ERR(right)) return PTR_ERR(right); - } + + root_add_used(root, root->leafsize); memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(right, right->start); @@ -3054,7 +3075,8 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, btrfs_set_path_blocking(path); ret = split_leaf(trans, root, &key, path, ins_len, 1); - BUG_ON(ret); + if (ret) + goto err; path->keep_locks = 0; btrfs_unlock_up_safe(path, 1); @@ -3796,9 +3818,10 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans, */ btrfs_unlock_up_safe(path, 0); - ret = btrfs_free_tree_block(trans, root, leaf->start, leaf->len, - 0, root->root_key.objectid, 0); - return ret; + root_sub_used(root, leaf->len); + + btrfs_free_tree_block(trans, root, leaf, 0, 1); + return 0; } /* * delete the item at the leaf level in path. If that empties @@ -3865,6 +3888,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (leaf == root->node) { btrfs_set_header_level(leaf, 0); } else { + btrfs_set_path_blocking(path); + clean_tree_block(trans, root, leaf); ret = btrfs_del_leaf(trans, root, path, leaf); BUG_ON(ret); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 746a7248678e..29c20092847e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -34,6 +34,7 @@ struct btrfs_trans_handle; struct btrfs_transaction; +struct btrfs_pending_snapshot; extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_transaction_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; @@ -663,6 +664,7 @@ struct btrfs_csum_item { #define BTRFS_BLOCK_GROUP_RAID1 (1 << 4) #define BTRFS_BLOCK_GROUP_DUP (1 << 5) #define BTRFS_BLOCK_GROUP_RAID10 (1 << 6) +#define BTRFS_NR_RAID_TYPES 5 struct btrfs_block_group_item { __le64 used; @@ -674,42 +676,46 @@ struct btrfs_space_info { u64 flags; u64 total_bytes; /* total bytes in the space */ - u64 bytes_used; /* total bytes used on disk */ + u64 bytes_used; /* total bytes used, + this does't take mirrors into account */ u64 bytes_pinned; /* total bytes pinned, will be freed when the transaction finishes */ u64 bytes_reserved; /* total bytes the allocator has reserved for current allocations */ u64 bytes_readonly; /* total bytes that are read only */ - u64 bytes_super; /* total bytes reserved for the super blocks */ - u64 bytes_root; /* the number of bytes needed to commit a - transaction */ + u64 bytes_may_use; /* number of bytes that may be used for delalloc/allocations */ - u64 bytes_delalloc; /* number of bytes currently reserved for - delayed allocation */ + u64 disk_used; /* total bytes used on disk */ int full; /* indicates that we cannot allocate any more chunks for this space */ int force_alloc; /* set if we need to force a chunk alloc for this space */ - int force_delalloc; /* make people start doing filemap_flush until - we're under a threshold */ struct list_head list; - /* for controlling how we free up space for allocations */ - wait_queue_head_t allocate_wait; - wait_queue_head_t flush_wait; - int allocating_chunk; - int flushing; - /* for block groups in our same type */ - struct list_head block_groups; + struct list_head block_groups[BTRFS_NR_RAID_TYPES]; spinlock_t lock; struct rw_semaphore groups_sem; atomic_t caching_threads; }; +struct btrfs_block_rsv { + u64 size; + u64 reserved; + u64 freed[2]; + struct btrfs_space_info *space_info; + struct list_head list; + spinlock_t lock; + atomic_t usage; + unsigned int priority:8; + unsigned int durable:1; + unsigned int refill_used:1; + unsigned int full:1; +}; + /* * free clusters are used to claim free space in relatively large chunks, * allowing us to do less seeky writes. They are used for all metadata @@ -760,6 +766,7 @@ struct btrfs_block_group_cache { spinlock_t lock; u64 pinned; u64 reserved; + u64 reserved_pinned; u64 bytes_super; u64 flags; u64 sectorsize; @@ -825,6 +832,22 @@ struct btrfs_fs_info { /* logical->physical extent mapping */ struct btrfs_mapping_tree mapping_tree; + /* block reservation for extent, checksum and root tree */ + struct btrfs_block_rsv global_block_rsv; + /* block reservation for delay allocation */ + struct btrfs_block_rsv delalloc_block_rsv; + /* block reservation for metadata operations */ + struct btrfs_block_rsv trans_block_rsv; + /* block reservation for chunk tree */ + struct btrfs_block_rsv chunk_block_rsv; + + struct btrfs_block_rsv empty_block_rsv; + + /* list of block reservations that cross multiple transactions */ + struct list_head durable_block_rsv_list; + + struct mutex durable_block_rsv_mutex; + u64 generation; u64 last_trans_committed; @@ -927,7 +950,6 @@ struct btrfs_fs_info { struct btrfs_workers endio_meta_write_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; - struct btrfs_workers enospc_workers; /* * fixup workers take dirty pages that didn't properly go through * the cow mechanism and make them safe to write. It happens @@ -943,6 +965,7 @@ struct btrfs_fs_info { int do_barriers; int closing; int log_root_recovering; + int enospc_unlink; u64 total_pinned; @@ -1012,6 +1035,9 @@ struct btrfs_root { struct completion kobj_unregister; struct mutex objectid_mutex; + spinlock_t accounting_lock; + struct btrfs_block_rsv *block_rsv; + struct mutex log_mutex; wait_queue_head_t log_writer_wait; wait_queue_head_t log_commit_wait[2]; @@ -1043,7 +1069,6 @@ struct btrfs_root { int ref_cows; int track_dirty; int in_radix; - int clean_orphans; u64 defrag_trans_start; struct btrfs_key defrag_progress; @@ -1057,8 +1082,11 @@ struct btrfs_root { struct list_head root_list; - spinlock_t list_lock; + spinlock_t orphan_lock; struct list_head orphan_list; + struct btrfs_block_rsv *orphan_block_rsv; + int orphan_item_inserted; + int orphan_cleanup_state; spinlock_t inode_lock; /* red-black tree that keeps track of in-memory inodes */ @@ -1965,6 +1993,9 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); +int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u64 *refs, u64 *flags); int btrfs_pin_extent(struct btrfs_root *root, u64 bytenr, u64 num, int reserved); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, @@ -1984,10 +2015,10 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, u64 parent, u64 root_objectid, struct btrfs_disk_key *key, int level, u64 hint, u64 empty_size); -int btrfs_free_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 bytenr, u32 blocksize, - u64 parent, u64 root_objectid, int level); +void btrfs_free_tree_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *buf, + u64 parent, int last_ref); struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u32 blocksize, @@ -2041,27 +2072,49 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 size); int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start); -int btrfs_prepare_block_group_relocation(struct btrfs_root *root, - struct btrfs_block_group_cache *group); - u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); - -int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items); -int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items); -int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root, - struct inode *inode, int num_items); -int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root, - struct inode *inode, int num_items); -int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, - u64 bytes); -void btrfs_free_reserved_data_space(struct btrfs_root *root, - struct inode *inode, u64 bytes); -void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, - u64 bytes); -void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, - u64 bytes); +int btrfs_check_data_free_space(struct inode *inode, u64 bytes); +void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); +int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + int num_items, int *retries); +void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root); +int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, + struct inode *inode); +void btrfs_orphan_release_metadata(struct inode *inode); +int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending); +int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); +void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); +int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); +void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); +void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv); +struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root); +void btrfs_free_block_rsv(struct btrfs_root *root, + struct btrfs_block_rsv *rsv); +void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *rsv); +int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 num_bytes, int *retries); +int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 min_reserved, int min_factor); +int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, + struct btrfs_block_rsv *dst_rsv, + u64 num_bytes); +void btrfs_block_rsv_release(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 num_bytes); +int btrfs_set_block_group_ro(struct btrfs_root *root, + struct btrfs_block_group_cache *cache); +int btrfs_set_block_group_rw(struct btrfs_root *root, + struct btrfs_block_group_cache *cache); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); @@ -2152,7 +2205,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); -int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref); +int btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref); int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, @@ -2245,6 +2299,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, u64 inode_objectid, u64 ref_objectid, u64 *index); +struct btrfs_inode_ref * +btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + const char *name, int name_len, + u64 inode_objectid, u64 ref_objectid, int mod); int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid); @@ -2257,6 +2317,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 len); int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst); +int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, + struct bio *bio, u64 logical_offset, u32 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, @@ -2311,6 +2373,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); +int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, @@ -2349,10 +2412,20 @@ int btrfs_update_inode(struct btrfs_trans_handle *trans, int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); void btrfs_orphan_cleanup(struct btrfs_root *root); +void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending, + u64 *bytes_to_reserve); +void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending); +void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_cont_expand(struct inode *inode, loff_t size); int btrfs_invalidate_inodes(struct btrfs_root *root); void btrfs_add_delayed_iput(struct inode *inode); void btrfs_run_delayed_iputs(struct btrfs_root *root); +int btrfs_prealloc_file_range(struct inode *inode, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint); extern const struct dentry_operations btrfs_dentry_operations; /* ioctl.c */ @@ -2361,7 +2434,7 @@ void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); /* file.c */ -int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync); +int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); int btrfs_check_file(struct btrfs_root *root, struct inode *inode); @@ -2409,4 +2482,12 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_recover_relocation(struct btrfs_root *root); int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); +void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct extent_buffer *buf, + struct extent_buffer *cow); +void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending, + u64 *bytes_to_reserve); +void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending); #endif diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 902ce507c4e3..e807b143b857 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -318,107 +318,6 @@ out: return ret; } -/* - * helper function to lookup reference count and flags of extent. - * - * the head node for delayed ref is used to store the sum of all the - * reference count modifications queued up in the rbtree. the head - * node may also store the extent flags to set. This way you can check - * to see what the reference count and extent flags would be if all of - * the delayed refs are not processed. - */ -int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 bytenr, - u64 num_bytes, u64 *refs, u64 *flags) -{ - struct btrfs_delayed_ref_node *ref; - struct btrfs_delayed_ref_head *head; - struct btrfs_delayed_ref_root *delayed_refs; - struct btrfs_path *path; - struct btrfs_extent_item *ei; - struct extent_buffer *leaf; - struct btrfs_key key; - u32 item_size; - u64 num_refs; - u64 extent_flags; - int ret; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - key.objectid = bytenr; - key.type = BTRFS_EXTENT_ITEM_KEY; - key.offset = num_bytes; - delayed_refs = &trans->transaction->delayed_refs; -again: - ret = btrfs_search_slot(trans, root->fs_info->extent_root, - &key, path, 0, 0); - if (ret < 0) - goto out; - - if (ret == 0) { - leaf = path->nodes[0]; - item_size = btrfs_item_size_nr(leaf, path->slots[0]); - if (item_size >= sizeof(*ei)) { - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_item); - num_refs = btrfs_extent_refs(leaf, ei); - extent_flags = btrfs_extent_flags(leaf, ei); - } else { -#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 - struct btrfs_extent_item_v0 *ei0; - BUG_ON(item_size != sizeof(*ei0)); - ei0 = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_item_v0); - num_refs = btrfs_extent_refs_v0(leaf, ei0); - /* FIXME: this isn't correct for data */ - extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; -#else - BUG(); -#endif - } - BUG_ON(num_refs == 0); - } else { - num_refs = 0; - extent_flags = 0; - ret = 0; - } - - spin_lock(&delayed_refs->lock); - ref = find_ref_head(&delayed_refs->root, bytenr, NULL); - if (ref) { - head = btrfs_delayed_node_to_head(ref); - if (!mutex_trylock(&head->mutex)) { - atomic_inc(&ref->refs); - spin_unlock(&delayed_refs->lock); - - btrfs_release_path(root->fs_info->extent_root, path); - - mutex_lock(&head->mutex); - mutex_unlock(&head->mutex); - btrfs_put_delayed_ref(ref); - goto again; - } - if (head->extent_op && head->extent_op->update_flags) - extent_flags |= head->extent_op->flags_to_set; - else - BUG_ON(num_refs == 0); - - num_refs += ref->ref_mod; - mutex_unlock(&head->mutex); - } - WARN_ON(num_refs == 0); - if (refs) - *refs = num_refs; - if (flags) - *flags = extent_flags; -out: - spin_unlock(&delayed_refs->lock); - btrfs_free_path(path); - return ret; -} - /* * helper function to update an extent delayed ref in the * rbtree. existing and update must both have the same diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index f6fc67ddad36..50e3cf92fbda 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -167,9 +167,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 bytenr, - u64 num_bytes, u64 *refs, u64 *flags); int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, u64 orig_parent, u64 parent, u64 orig_ref_root, u64 ref_root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index feca04197d02..f3b287c22caf 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -74,6 +74,11 @@ struct async_submit_bio { int rw; int mirror_num; unsigned long bio_flags; + /* + * bio_offset is optional, can be used if the pages in the bio + * can't tell us where in the file the bio should go + */ + u64 bio_offset; struct btrfs_work work; }; @@ -534,7 +539,8 @@ static void run_one_async_start(struct btrfs_work *work) async = container_of(work, struct async_submit_bio, work); fs_info = BTRFS_I(async->inode)->root->fs_info; async->submit_bio_start(async->inode, async->rw, async->bio, - async->mirror_num, async->bio_flags); + async->mirror_num, async->bio_flags, + async->bio_offset); } static void run_one_async_done(struct btrfs_work *work) @@ -556,7 +562,8 @@ static void run_one_async_done(struct btrfs_work *work) wake_up(&fs_info->async_submit_wait); async->submit_bio_done(async->inode, async->rw, async->bio, - async->mirror_num, async->bio_flags); + async->mirror_num, async->bio_flags, + async->bio_offset); } static void run_one_async_free(struct btrfs_work *work) @@ -570,6 +577,7 @@ static void run_one_async_free(struct btrfs_work *work) int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags, + u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done) { @@ -592,6 +600,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, async->work.flags = 0; async->bio_flags = bio_flags; + async->bio_offset = bio_offset; atomic_inc(&fs_info->nr_async_submits); @@ -627,7 +636,8 @@ static int btree_csum_one_bio(struct bio *bio) static int __btree_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, - unsigned long bio_flags) + unsigned long bio_flags, + u64 bio_offset) { /* * when we're called for a write, we're already in the async @@ -638,7 +648,8 @@ static int __btree_submit_bio_start(struct inode *inode, int rw, } static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, unsigned long bio_flags, + u64 bio_offset) { /* * when we're called for a write, we're already in the async @@ -648,7 +659,8 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, unsigned long bio_flags, + u64 bio_offset) { int ret; @@ -671,6 +683,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, */ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, 0, + bio_offset, __btree_submit_bio_start, __btree_submit_bio_done); } @@ -894,7 +907,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->ref_cows = 0; root->track_dirty = 0; root->in_radix = 0; - root->clean_orphans = 0; + root->orphan_item_inserted = 0; + root->orphan_cleanup_state = 0; root->fs_info = fs_info; root->objectid = objectid; @@ -903,13 +917,16 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->name = NULL; root->in_sysfs = 0; root->inode_tree = RB_ROOT; + root->block_rsv = NULL; + root->orphan_block_rsv = NULL; INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); INIT_LIST_HEAD(&root->root_list); spin_lock_init(&root->node_lock); - spin_lock_init(&root->list_lock); + spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); + spin_lock_init(&root->accounting_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); init_waitqueue_head(&root->log_writer_wait); @@ -968,42 +985,6 @@ static int find_and_setup_root(struct btrfs_root *tree_root, return 0; } -int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info) -{ - struct extent_buffer *eb; - struct btrfs_root *log_root_tree = fs_info->log_root_tree; - u64 start = 0; - u64 end = 0; - int ret; - - if (!log_root_tree) - return 0; - - while (1) { - ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, - 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); - if (ret) - break; - - clear_extent_bits(&log_root_tree->dirty_log_pages, start, end, - EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); - } - eb = fs_info->log_root_tree->node; - - WARN_ON(btrfs_header_level(eb) != 0); - WARN_ON(btrfs_header_nritems(eb) != 0); - - ret = btrfs_free_reserved_extent(fs_info->tree_root, - eb->start, eb->len); - BUG_ON(ret); - - free_extent_buffer(eb); - kfree(fs_info->log_root_tree); - fs_info->log_root_tree = NULL; - return 0; -} - static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { @@ -1191,19 +1172,23 @@ again: if (root) return root; - ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid); - if (ret == 0) - ret = -ENOENT; - if (ret < 0) - return ERR_PTR(ret); - root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location); if (IS_ERR(root)) return root; - WARN_ON(btrfs_root_refs(&root->root_item) == 0); set_anon_super(&root->anon_super, NULL); + if (btrfs_root_refs(&root->root_item) == 0) { + ret = -ENOENT; + goto fail; + } + + ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid); + if (ret < 0) + goto fail; + if (ret == 0) + root->orphan_item_inserted = 1; + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); if (ret) goto fail; @@ -1212,10 +1197,9 @@ again: ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, root); - if (ret == 0) { + if (ret == 0) root->in_radix = 1; - root->clean_orphans = 1; - } + spin_unlock(&fs_info->fs_roots_radix_lock); radix_tree_preload_end(); if (ret) { @@ -1461,10 +1445,6 @@ static int cleaner_kthread(void *arg) struct btrfs_root *root = arg; do { - smp_mb(); - if (root->fs_info->closing) - break; - vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); if (!(root->fs_info->sb->s_flags & MS_RDONLY) && @@ -1477,11 +1457,9 @@ static int cleaner_kthread(void *arg) if (freezing(current)) { refrigerator(); } else { - smp_mb(); - if (root->fs_info->closing) - break; set_current_state(TASK_INTERRUPTIBLE); - schedule(); + if (!kthread_should_stop()) + schedule(); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); @@ -1493,36 +1471,40 @@ static int transaction_kthread(void *arg) struct btrfs_root *root = arg; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; + u64 transid; unsigned long now; unsigned long delay; int ret; do { - smp_mb(); - if (root->fs_info->closing) - break; - delay = HZ * 30; vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->new_trans_lock); cur = root->fs_info->running_transaction; if (!cur) { - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->new_trans_lock); goto sleep; } now = get_seconds(); - if (now < cur->start_time || now - cur->start_time < 30) { - mutex_unlock(&root->fs_info->trans_mutex); + if (!cur->blocked && + (now < cur->start_time || now - cur->start_time < 30)) { + spin_unlock(&root->fs_info->new_trans_lock); delay = HZ * 5; goto sleep; } - mutex_unlock(&root->fs_info->trans_mutex); - trans = btrfs_start_transaction(root, 1); - ret = btrfs_commit_transaction(trans, root); + transid = cur->transid; + spin_unlock(&root->fs_info->new_trans_lock); + trans = btrfs_join_transaction(root, 1); + if (transid == trans->transid) { + ret = btrfs_commit_transaction(trans, root); + BUG_ON(ret); + } else { + btrfs_end_transaction(trans, root); + } sleep: wake_up_process(root->fs_info->cleaner_kthread); mutex_unlock(&root->fs_info->transaction_kthread_mutex); @@ -1530,10 +1512,10 @@ sleep: if (freezing(current)) { refrigerator(); } else { - if (root->fs_info->closing) - break; set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(delay); + if (!kthread_should_stop() && + !btrfs_transaction_blocked(root->fs_info)) + schedule_timeout(delay); __set_current_state(TASK_RUNNING); } } while (!kthread_should_stop()); @@ -1620,6 +1602,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); + btrfs_init_block_rsv(&fs_info->global_block_rsv); + btrfs_init_block_rsv(&fs_info->delalloc_block_rsv); + btrfs_init_block_rsv(&fs_info->trans_block_rsv); + btrfs_init_block_rsv(&fs_info->chunk_block_rsv); + btrfs_init_block_rsv(&fs_info->empty_block_rsv); + INIT_LIST_HEAD(&fs_info->durable_block_rsv_list); + mutex_init(&fs_info->durable_block_rsv_mutex); atomic_set(&fs_info->nr_async_submits, 0); atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); @@ -1759,9 +1748,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size), &fs_info->generic_worker); - btrfs_init_workers(&fs_info->enospc_workers, "enospc", - fs_info->thread_pool_size, - &fs_info->generic_worker); /* a higher idle thresh on the submit workers makes it much more * likely that bios will be send down in a sane order to the @@ -1809,7 +1795,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_workers, 1); btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); - btrfs_start_workers(&fs_info->enospc_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -1912,17 +1897,18 @@ struct btrfs_root *open_ctree(struct super_block *sb, csum_root->track_dirty = 1; + fs_info->generation = generation; + fs_info->last_trans_committed = generation; + fs_info->data_alloc_profile = (u64)-1; + fs_info->metadata_alloc_profile = (u64)-1; + fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; + ret = btrfs_read_block_groups(extent_root); if (ret) { printk(KERN_ERR "Failed to read block groups: %d\n", ret); goto fail_block_groups; } - fs_info->generation = generation; - fs_info->last_trans_committed = generation; - fs_info->data_alloc_profile = (u64)-1; - fs_info->metadata_alloc_profile = (u64)-1; - fs_info->system_alloc_profile = fs_info->metadata_alloc_profile; fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) @@ -1977,6 +1963,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, BUG_ON(ret); if (!(sb->s_flags & MS_RDONLY)) { + ret = btrfs_cleanup_fs_roots(fs_info); + BUG_ON(ret); + ret = btrfs_recover_relocation(tree_root); if (ret < 0) { printk(KERN_WARNING @@ -2040,7 +2029,6 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); - btrfs_stop_workers(&fs_info->enospc_workers); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); @@ -2405,11 +2393,11 @@ int btrfs_commit_super(struct btrfs_root *root) down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_join_transaction(root, 1); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ - trans = btrfs_start_transaction(root, 1); + trans = btrfs_join_transaction(root, 1); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); @@ -2426,15 +2414,15 @@ int close_ctree(struct btrfs_root *root) fs_info->closing = 1; smp_mb(); - kthread_stop(root->fs_info->transaction_kthread); - kthread_stop(root->fs_info->cleaner_kthread); - if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } + kthread_stop(root->fs_info->transaction_kthread); + kthread_stop(root->fs_info->cleaner_kthread); + fs_info->closing = 2; smp_mb(); @@ -2473,7 +2461,6 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); - btrfs_stop_workers(&fs_info->enospc_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index c958ecbc1916..88e825a0bf21 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -87,7 +87,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata); int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, - unsigned long bio_flags, + unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done); @@ -95,8 +95,6 @@ int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); -int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, - struct btrfs_fs_info *fs_info); int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); int btrfs_add_log_tree(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c6a4f459ad76..b9080d71991a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -35,10 +35,9 @@ static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc, - int mark_free); -static int update_reserved_extents(struct btrfs_block_group_cache *cache, - u64 num_bytes, int reserve); + u64 bytenr, u64 num_bytes, int alloc); +static int update_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int reserve, int sinfo); static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -61,12 +60,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, static int do_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, u64 alloc_bytes, u64 flags, int force); -static int pin_down_bytes(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - u64 bytenr, u64 num_bytes, - int is_data, int reserved, - struct extent_buffer **must_clean); static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key); static void dump_space_info(struct btrfs_space_info *info, u64 bytes, @@ -91,8 +84,12 @@ void btrfs_get_block_group(struct btrfs_block_group_cache *cache) void btrfs_put_block_group(struct btrfs_block_group_cache *cache) { - if (atomic_dec_and_test(&cache->count)) + if (atomic_dec_and_test(&cache->count)) { + WARN_ON(cache->pinned > 0); + WARN_ON(cache->reserved > 0); + WARN_ON(cache->reserved_pinned > 0); kfree(cache); + } } /* @@ -319,7 +316,7 @@ static int caching_kthread(void *data) exclude_super_stripes(extent_root, block_group); spin_lock(&block_group->space_info->lock); - block_group->space_info->bytes_super += block_group->bytes_super; + block_group->space_info->bytes_readonly += block_group->bytes_super; spin_unlock(&block_group->space_info->lock); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); @@ -507,6 +504,9 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, struct list_head *head = &info->space_info; struct btrfs_space_info *found; + flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM | + BTRFS_BLOCK_GROUP_METADATA; + rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags == flags) { @@ -609,6 +609,113 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) return ret; } +/* + * helper function to lookup reference count and flags of extent. + * + * the head node for delayed ref is used to store the sum of all the + * reference count modifications queued up in the rbtree. the head + * node may also store the extent flags to set. This way you can check + * to see what the reference count and extent flags would be if all of + * the delayed refs are not processed. + */ +int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u64 *refs, u64 *flags) +{ + struct btrfs_delayed_ref_head *head; + struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_path *path; + struct btrfs_extent_item *ei; + struct extent_buffer *leaf; + struct btrfs_key key; + u32 item_size; + u64 num_refs; + u64 extent_flags; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + key.objectid = bytenr; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = num_bytes; + if (!trans) { + path->skip_locking = 1; + path->search_commit_root = 1; + } +again: + ret = btrfs_search_slot(trans, root->fs_info->extent_root, + &key, path, 0, 0); + if (ret < 0) + goto out_free; + + if (ret == 0) { + leaf = path->nodes[0]; + item_size = btrfs_item_size_nr(leaf, path->slots[0]); + if (item_size >= sizeof(*ei)) { + ei = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_item); + num_refs = btrfs_extent_refs(leaf, ei); + extent_flags = btrfs_extent_flags(leaf, ei); + } else { +#ifdef BTRFS_COMPAT_EXTENT_TREE_V0 + struct btrfs_extent_item_v0 *ei0; + BUG_ON(item_size != sizeof(*ei0)); + ei0 = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_item_v0); + num_refs = btrfs_extent_refs_v0(leaf, ei0); + /* FIXME: this isn't correct for data */ + extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF; +#else + BUG(); +#endif + } + BUG_ON(num_refs == 0); + } else { + num_refs = 0; + extent_flags = 0; + ret = 0; + } + + if (!trans) + goto out; + + delayed_refs = &trans->transaction->delayed_refs; + spin_lock(&delayed_refs->lock); + head = btrfs_find_delayed_ref_head(trans, bytenr); + if (head) { + if (!mutex_trylock(&head->mutex)) { + atomic_inc(&head->node.refs); + spin_unlock(&delayed_refs->lock); + + btrfs_release_path(root->fs_info->extent_root, path); + + mutex_lock(&head->mutex); + mutex_unlock(&head->mutex); + btrfs_put_delayed_ref(&head->node); + goto again; + } + if (head->extent_op && head->extent_op->update_flags) + extent_flags |= head->extent_op->flags_to_set; + else + BUG_ON(num_refs == 0); + + num_refs += head->node.ref_mod; + mutex_unlock(&head->mutex); + } + spin_unlock(&delayed_refs->lock); +out: + WARN_ON(num_refs == 0); + if (refs) + *refs = num_refs; + if (flags) + *flags = extent_flags; +out_free: + btrfs_free_path(path); + return ret; +} + /* * Back reference rules. Back refs have three main goals: * @@ -1871,7 +1978,6 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, return ret; } - /* helper function to actually process a single delayed ref entry */ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -1891,32 +1997,14 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans, BUG_ON(extent_op); head = btrfs_delayed_node_to_head(node); if (insert_reserved) { - int mark_free = 0; - struct extent_buffer *must_clean = NULL; - - ret = pin_down_bytes(trans, root, NULL, - node->bytenr, node->num_bytes, - head->is_data, 1, &must_clean); - if (ret > 0) - mark_free = 1; - - if (must_clean) { - clean_tree_block(NULL, root, must_clean); - btrfs_tree_unlock(must_clean); - free_extent_buffer(must_clean); - } + btrfs_pin_extent(root, node->bytenr, + node->num_bytes, 1); if (head->is_data) { ret = btrfs_del_csums(trans, root, node->bytenr, node->num_bytes); BUG_ON(ret); } - if (mark_free) { - ret = btrfs_free_reserved_extent(root, - node->bytenr, - node->num_bytes); - BUG_ON(ret); - } } mutex_unlock(&head->mutex); return 0; @@ -2347,6 +2435,8 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, ret = 0; out: btrfs_free_path(path); + if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) + WARN_ON(ret > 0); return ret; } @@ -2660,12 +2750,21 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, struct btrfs_space_info **space_info) { struct btrfs_space_info *found; + int i; + int factor; + + if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10)) + factor = 2; + else + factor = 1; found = __find_space_info(info, flags); if (found) { spin_lock(&found->lock); found->total_bytes += total_bytes; found->bytes_used += bytes_used; + found->disk_used += bytes_used * factor; found->full = 0; spin_unlock(&found->lock); *space_info = found; @@ -2675,18 +2774,20 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, if (!found) return -ENOMEM; - INIT_LIST_HEAD(&found->block_groups); + for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) + INIT_LIST_HEAD(&found->block_groups[i]); init_rwsem(&found->groups_sem); - init_waitqueue_head(&found->flush_wait); - init_waitqueue_head(&found->allocate_wait); spin_lock_init(&found->lock); - found->flags = flags; + found->flags = flags & (BTRFS_BLOCK_GROUP_DATA | + BTRFS_BLOCK_GROUP_SYSTEM | + BTRFS_BLOCK_GROUP_METADATA); found->total_bytes = total_bytes; found->bytes_used = bytes_used; + found->disk_used = bytes_used * factor; found->bytes_pinned = 0; found->bytes_reserved = 0; found->bytes_readonly = 0; - found->bytes_delalloc = 0; + found->bytes_may_use = 0; found->full = 0; found->force_alloc = 0; *space_info = found; @@ -2711,19 +2812,6 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) } } -static void set_block_group_readonly(struct btrfs_block_group_cache *cache) -{ - spin_lock(&cache->space_info->lock); - spin_lock(&cache->lock); - if (!cache->ro) { - cache->space_info->bytes_readonly += cache->key.offset - - btrfs_block_group_used(&cache->item); - cache->ro = 1; - } - spin_unlock(&cache->lock); - spin_unlock(&cache->space_info->lock); -} - u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { u64 num_devices = root->fs_info->fs_devices->rw_devices; @@ -2752,722 +2840,946 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) return flags; } -static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data) -{ - struct btrfs_fs_info *info = root->fs_info; - u64 alloc_profile; - - if (data) { - alloc_profile = info->avail_data_alloc_bits & - info->data_alloc_profile; - data = BTRFS_BLOCK_GROUP_DATA | alloc_profile; - } else if (root == root->fs_info->chunk_root) { - alloc_profile = info->avail_system_alloc_bits & - info->system_alloc_profile; - data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile; - } else { - alloc_profile = info->avail_metadata_alloc_bits & - info->metadata_alloc_profile; - data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; - } - - return btrfs_reduce_alloc_profile(root, data); -} - -void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) +static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) { - u64 alloc_target; - - alloc_target = btrfs_get_alloc_profile(root, 1); - BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, - alloc_target); + if (flags & BTRFS_BLOCK_GROUP_DATA) + flags |= root->fs_info->avail_data_alloc_bits & + root->fs_info->data_alloc_profile; + else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) + flags |= root->fs_info->avail_system_alloc_bits & + root->fs_info->system_alloc_profile; + else if (flags & BTRFS_BLOCK_GROUP_METADATA) + flags |= root->fs_info->avail_metadata_alloc_bits & + root->fs_info->metadata_alloc_profile; + return btrfs_reduce_alloc_profile(root, flags); } -static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items) +static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) { - u64 num_bytes; - int level; - - level = BTRFS_MAX_LEVEL - 2; - /* - * NOTE: these calculations are absolutely the worst possible case. - * This assumes that _every_ item we insert will require a new leaf, and - * that the tree has grown to its maximum level size. - */ + u64 flags; - /* - * for every item we insert we could insert both an extent item and a - * extent ref item. Then for ever item we insert, we will need to cow - * both the original leaf, plus the leaf to the left and right of it. - * - * Unless we are talking about the extent root, then we just want the - * number of items * 2, since we just need the extent item plus its ref. - */ - if (root == root->fs_info->extent_root) - num_bytes = num_items * 2; + if (data) + flags = BTRFS_BLOCK_GROUP_DATA; + else if (root == root->fs_info->chunk_root) + flags = BTRFS_BLOCK_GROUP_SYSTEM; else - num_bytes = (num_items + (2 * num_items)) * 3; + flags = BTRFS_BLOCK_GROUP_METADATA; - /* - * num_bytes is total number of leaves we could need times the leaf - * size, and then for every leaf we could end up cow'ing 2 nodes per - * level, down to the leaf level. - */ - num_bytes = (num_bytes * root->leafsize) + - (num_bytes * (level * 2)) * root->nodesize; + return get_alloc_profile(root, flags); +} - return num_bytes; +void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode) +{ + BTRFS_I(inode)->space_info = __find_space_info(root->fs_info, + BTRFS_BLOCK_GROUP_DATA); } /* - * Unreserve metadata space for delalloc. If we have less reserved credits than - * we have extents, this function does nothing. + * This will check the space that the inode allocates from to make sure we have + * enough space for bytes. */ -int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root, - struct inode *inode, int num_items) +int btrfs_check_data_free_space(struct inode *inode, u64 bytes) { - struct btrfs_fs_info *info = root->fs_info; - struct btrfs_space_info *meta_sinfo; - u64 num_bytes; - u64 alloc_target; - bool bug = false; + struct btrfs_space_info *data_sinfo; + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 used; + int ret = 0, committed = 0; - /* get the space info for where the metadata will live */ - alloc_target = btrfs_get_alloc_profile(root, 0); - meta_sinfo = __find_space_info(info, alloc_target); + /* make sure bytes are sectorsize aligned */ + bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - num_bytes = calculate_bytes_needed(root->fs_info->extent_root, - num_items); + data_sinfo = BTRFS_I(inode)->space_info; + if (!data_sinfo) + goto alloc; - spin_lock(&meta_sinfo->lock); - spin_lock(&BTRFS_I(inode)->accounting_lock); - if (BTRFS_I(inode)->reserved_extents <= - BTRFS_I(inode)->outstanding_extents) { - spin_unlock(&BTRFS_I(inode)->accounting_lock); - spin_unlock(&meta_sinfo->lock); - return 0; - } - spin_unlock(&BTRFS_I(inode)->accounting_lock); +again: + /* make sure we have enough space to handle the data first */ + spin_lock(&data_sinfo->lock); + used = data_sinfo->bytes_used + data_sinfo->bytes_reserved + + data_sinfo->bytes_pinned + data_sinfo->bytes_readonly + + data_sinfo->bytes_may_use; + + if (used + bytes > data_sinfo->total_bytes) { + struct btrfs_trans_handle *trans; - BTRFS_I(inode)->reserved_extents -= num_items; - BUG_ON(BTRFS_I(inode)->reserved_extents < 0); + /* + * if we don't have enough free bytes in this space then we need + * to alloc a new chunk. + */ + if (!data_sinfo->full) { + u64 alloc_target; - if (meta_sinfo->bytes_delalloc < num_bytes) { - bug = true; - meta_sinfo->bytes_delalloc = 0; - } else { - meta_sinfo->bytes_delalloc -= num_bytes; - } - spin_unlock(&meta_sinfo->lock); + data_sinfo->force_alloc = 1; + spin_unlock(&data_sinfo->lock); +alloc: + alloc_target = btrfs_get_alloc_profile(root, 1); + trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); - BUG_ON(bug); + ret = do_chunk_alloc(trans, root->fs_info->extent_root, + bytes + 2 * 1024 * 1024, + alloc_target, 0); + btrfs_end_transaction(trans, root); + if (ret < 0) + return ret; - return 0; -} + if (!data_sinfo) { + btrfs_set_inode_space_info(root, inode); + data_sinfo = BTRFS_I(inode)->space_info; + } + goto again; + } + spin_unlock(&data_sinfo->lock); -static void check_force_delalloc(struct btrfs_space_info *meta_sinfo) -{ - u64 thresh; + /* commit the current transaction and try again */ + if (!committed && !root->fs_info->open_ioctl_trans) { + committed = 1; + trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); + ret = btrfs_commit_transaction(trans, root); + if (ret) + return ret; + goto again; + } - thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + - meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + - meta_sinfo->bytes_super + meta_sinfo->bytes_root + - meta_sinfo->bytes_may_use; +#if 0 /* I hope we never need this code again, just in case */ + printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " + "%llu bytes_reserved, " "%llu bytes_pinned, " + "%llu bytes_readonly, %llu may use %llu total\n", + (unsigned long long)bytes, + (unsigned long long)data_sinfo->bytes_used, + (unsigned long long)data_sinfo->bytes_reserved, + (unsigned long long)data_sinfo->bytes_pinned, + (unsigned long long)data_sinfo->bytes_readonly, + (unsigned long long)data_sinfo->bytes_may_use, + (unsigned long long)data_sinfo->total_bytes); +#endif + return -ENOSPC; + } + data_sinfo->bytes_may_use += bytes; + BTRFS_I(inode)->reserved_bytes += bytes; + spin_unlock(&data_sinfo->lock); - thresh = meta_sinfo->total_bytes - thresh; - thresh *= 80; - do_div(thresh, 100); - if (thresh <= meta_sinfo->bytes_delalloc) - meta_sinfo->force_delalloc = 1; - else - meta_sinfo->force_delalloc = 0; + return 0; } -struct async_flush { - struct btrfs_root *root; - struct btrfs_space_info *info; - struct btrfs_work work; -}; - -static noinline void flush_delalloc_async(struct btrfs_work *work) +/* + * called when we are clearing an delalloc extent from the + * inode's io_tree or there was an error for whatever reason + * after calling btrfs_check_data_free_space + */ +void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes) { - struct async_flush *async; - struct btrfs_root *root; - struct btrfs_space_info *info; - - async = container_of(work, struct async_flush, work); - root = async->root; - info = async->info; - - btrfs_start_delalloc_inodes(root, 0); - wake_up(&info->flush_wait); - btrfs_wait_ordered_extents(root, 0, 0); + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_space_info *data_sinfo; - spin_lock(&info->lock); - info->flushing = 0; - spin_unlock(&info->lock); - wake_up(&info->flush_wait); + /* make sure bytes are sectorsize aligned */ + bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - kfree(async); + data_sinfo = BTRFS_I(inode)->space_info; + spin_lock(&data_sinfo->lock); + data_sinfo->bytes_may_use -= bytes; + BTRFS_I(inode)->reserved_bytes -= bytes; + spin_unlock(&data_sinfo->lock); } -static void wait_on_flush(struct btrfs_space_info *info) +static void force_metadata_allocation(struct btrfs_fs_info *info) { - DEFINE_WAIT(wait); - u64 used; - - while (1) { - prepare_to_wait(&info->flush_wait, &wait, - TASK_UNINTERRUPTIBLE); - spin_lock(&info->lock); - if (!info->flushing) { - spin_unlock(&info->lock); - break; - } + struct list_head *head = &info->space_info; + struct btrfs_space_info *found; - used = info->bytes_used + info->bytes_reserved + - info->bytes_pinned + info->bytes_readonly + - info->bytes_super + info->bytes_root + - info->bytes_may_use + info->bytes_delalloc; - if (used < info->total_bytes) { - spin_unlock(&info->lock); - break; - } - spin_unlock(&info->lock); - schedule(); + rcu_read_lock(); + list_for_each_entry_rcu(found, head, list) { + if (found->flags & BTRFS_BLOCK_GROUP_METADATA) + found->force_alloc = 1; } - finish_wait(&info->flush_wait, &wait); + rcu_read_unlock(); } -static void flush_delalloc(struct btrfs_root *root, - struct btrfs_space_info *info) +static int should_alloc_chunk(struct btrfs_space_info *sinfo, + u64 alloc_bytes) { - struct async_flush *async; - bool wait = false; - - spin_lock(&info->lock); - - if (!info->flushing) - info->flushing = 1; - else - wait = true; - - spin_unlock(&info->lock); - - if (wait) { - wait_on_flush(info); - return; - } - - async = kzalloc(sizeof(*async), GFP_NOFS); - if (!async) - goto flush; - - async->root = root; - async->info = info; - async->work.func = flush_delalloc_async; + u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; - btrfs_queue_worker(&root->fs_info->enospc_workers, - &async->work); - wait_on_flush(info); - return; + if (sinfo->bytes_used + sinfo->bytes_reserved + + alloc_bytes + 256 * 1024 * 1024 < num_bytes) + return 0; -flush: - btrfs_start_delalloc_inodes(root, 0); - btrfs_wait_ordered_extents(root, 0, 0); + if (sinfo->bytes_used + sinfo->bytes_reserved + + alloc_bytes < div_factor(num_bytes, 8)) + return 0; - spin_lock(&info->lock); - info->flushing = 0; - spin_unlock(&info->lock); - wake_up(&info->flush_wait); + return 1; } -static int maybe_allocate_chunk(struct btrfs_root *root, - struct btrfs_space_info *info) +static int do_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, u64 alloc_bytes, + u64 flags, int force) { - struct btrfs_super_block *disk_super = &root->fs_info->super_copy; - struct btrfs_trans_handle *trans; - bool wait = false; + struct btrfs_space_info *space_info; + struct btrfs_fs_info *fs_info = extent_root->fs_info; int ret = 0; - u64 min_metadata; - u64 free_space; - free_space = btrfs_super_total_bytes(disk_super); - /* - * we allow the metadata to grow to a max of either 10gb or 5% of the - * space in the volume. - */ - min_metadata = min((u64)10 * 1024 * 1024 * 1024, - div64_u64(free_space * 5, 100)); - if (info->total_bytes >= min_metadata) { - spin_unlock(&info->lock); - return 0; - } + mutex_lock(&fs_info->chunk_mutex); - if (info->full) { - spin_unlock(&info->lock); - return 0; + flags = btrfs_reduce_alloc_profile(extent_root, flags); + + space_info = __find_space_info(extent_root->fs_info, flags); + if (!space_info) { + ret = update_space_info(extent_root->fs_info, flags, + 0, 0, &space_info); + BUG_ON(ret); } + BUG_ON(!space_info); - if (!info->allocating_chunk) { - info->force_alloc = 1; - info->allocating_chunk = 1; - } else { - wait = true; + spin_lock(&space_info->lock); + if (space_info->force_alloc) + force = 1; + if (space_info->full) { + spin_unlock(&space_info->lock); + goto out; } - spin_unlock(&info->lock); - - if (wait) { - wait_event(info->allocate_wait, - !info->allocating_chunk); - return 1; + if (!force && !should_alloc_chunk(space_info, alloc_bytes)) { + spin_unlock(&space_info->lock); + goto out; } + spin_unlock(&space_info->lock); - trans = btrfs_start_transaction(root, 1); - if (!trans) { - ret = -ENOMEM; - goto out; + /* + * if we're doing a data chunk, go ahead and make sure that + * we keep a reasonable number of metadata chunks allocated in the + * FS as well. + */ + if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { + fs_info->data_chunk_allocations++; + if (!(fs_info->data_chunk_allocations % + fs_info->metadata_ratio)) + force_metadata_allocation(fs_info); } - ret = do_chunk_alloc(trans, root->fs_info->extent_root, - 4096 + 2 * 1024 * 1024, - info->flags, 0); - btrfs_end_transaction(trans, root); + ret = btrfs_alloc_chunk(trans, extent_root, flags); + spin_lock(&space_info->lock); if (ret) - goto out; + space_info->full = 1; + else + ret = 1; + space_info->force_alloc = 0; + spin_unlock(&space_info->lock); out: - spin_lock(&info->lock); - info->allocating_chunk = 0; - spin_unlock(&info->lock); - wake_up(&info->allocate_wait); + mutex_unlock(&extent_root->fs_info->chunk_mutex); + return ret; +} - if (ret) +static int maybe_allocate_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_space_info *sinfo, u64 num_bytes) +{ + int ret; + int end_trans = 0; + + if (sinfo->full) return 0; - return 1; + + spin_lock(&sinfo->lock); + ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024); + spin_unlock(&sinfo->lock); + if (!ret) + return 0; + + if (!trans) { + trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); + end_trans = 1; + } + + ret = do_chunk_alloc(trans, root->fs_info->extent_root, + num_bytes + 2 * 1024 * 1024, + get_alloc_profile(root, sinfo->flags), 0); + + if (end_trans) + btrfs_end_transaction(trans, root); + + return ret == 1 ? 1 : 0; } /* - * Reserve metadata space for delalloc. + * shrink metadata reservation for delalloc */ -int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root, - struct inode *inode, int num_items) +static int shrink_delalloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 to_reclaim) +{ + struct btrfs_block_rsv *block_rsv; + u64 reserved; + u64 max_reclaim; + u64 reclaimed = 0; + int pause = 1; + int ret; + + block_rsv = &root->fs_info->delalloc_block_rsv; + spin_lock(&block_rsv->lock); + reserved = block_rsv->reserved; + spin_unlock(&block_rsv->lock); + + if (reserved == 0) + return 0; + + max_reclaim = min(reserved, to_reclaim); + + while (1) { + ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0); + if (!ret) { + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(pause); + pause <<= 1; + if (pause > HZ / 10) + pause = HZ / 10; + } else { + pause = 1; + } + + spin_lock(&block_rsv->lock); + if (reserved > block_rsv->reserved) + reclaimed = reserved - block_rsv->reserved; + reserved = block_rsv->reserved; + spin_unlock(&block_rsv->lock); + + if (reserved == 0 || reclaimed >= max_reclaim) + break; + + if (trans && trans->transaction->blocked) + return -EAGAIN; + } + return reclaimed >= to_reclaim; +} + +static int should_retry_reserve(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 num_bytes, int *retries) { - struct btrfs_fs_info *info = root->fs_info; - struct btrfs_space_info *meta_sinfo; - u64 num_bytes; - u64 used; - u64 alloc_target; - int flushed = 0; - int force_delalloc; + struct btrfs_space_info *space_info = block_rsv->space_info; + int ret; - /* get the space info for where the metadata will live */ - alloc_target = btrfs_get_alloc_profile(root, 0); - meta_sinfo = __find_space_info(info, alloc_target); + if ((*retries) > 2) + return -ENOSPC; - num_bytes = calculate_bytes_needed(root->fs_info->extent_root, - num_items); -again: - spin_lock(&meta_sinfo->lock); + ret = maybe_allocate_chunk(trans, root, space_info, num_bytes); + if (ret) + return 1; - force_delalloc = meta_sinfo->force_delalloc; + if (trans && trans->transaction->in_commit) + return -ENOSPC; - if (unlikely(!meta_sinfo->bytes_root)) - meta_sinfo->bytes_root = calculate_bytes_needed(root, 6); + ret = shrink_delalloc(trans, root, num_bytes); + if (ret) + return ret; - if (!flushed) - meta_sinfo->bytes_delalloc += num_bytes; + spin_lock(&space_info->lock); + if (space_info->bytes_pinned < num_bytes) + ret = 1; + spin_unlock(&space_info->lock); + if (ret) + return -ENOSPC; - used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + - meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + - meta_sinfo->bytes_super + meta_sinfo->bytes_root + - meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc; + (*retries)++; - if (used > meta_sinfo->total_bytes) { - flushed++; + if (trans) + return -EAGAIN; - if (flushed == 1) { - if (maybe_allocate_chunk(root, meta_sinfo)) - goto again; - flushed++; + trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); + ret = btrfs_commit_transaction(trans, root); + BUG_ON(ret); + + return 1; +} + +static int reserve_metadata_bytes(struct btrfs_block_rsv *block_rsv, + u64 num_bytes) +{ + struct btrfs_space_info *space_info = block_rsv->space_info; + u64 unused; + int ret = -ENOSPC; + + spin_lock(&space_info->lock); + unused = space_info->bytes_used + space_info->bytes_reserved + + space_info->bytes_pinned + space_info->bytes_readonly; + + if (unused < space_info->total_bytes) + unused = space_info->total_bytes - unused; + else + unused = 0; + + if (unused >= num_bytes) { + if (block_rsv->priority >= 10) { + space_info->bytes_reserved += num_bytes; + ret = 0; } else { - spin_unlock(&meta_sinfo->lock); + if ((unused + block_rsv->reserved) * + block_rsv->priority >= + (num_bytes + block_rsv->reserved) * 10) { + space_info->bytes_reserved += num_bytes; + ret = 0; + } } + } + spin_unlock(&space_info->lock); - if (flushed == 2) { - filemap_flush(inode->i_mapping); - goto again; - } else if (flushed == 3) { - flush_delalloc(root, meta_sinfo); - goto again; + return ret; +} + +static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_block_rsv *block_rsv; + if (root->ref_cows) + block_rsv = trans->block_rsv; + else + block_rsv = root->block_rsv; + + if (!block_rsv) + block_rsv = &root->fs_info->empty_block_rsv; + + return block_rsv; +} + +static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, + u64 num_bytes) +{ + int ret = -ENOSPC; + spin_lock(&block_rsv->lock); + if (block_rsv->reserved >= num_bytes) { + block_rsv->reserved -= num_bytes; + if (block_rsv->reserved < block_rsv->size) + block_rsv->full = 0; + ret = 0; + } + spin_unlock(&block_rsv->lock); + return ret; +} + +static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, + u64 num_bytes, int update_size) +{ + spin_lock(&block_rsv->lock); + block_rsv->reserved += num_bytes; + if (update_size) + block_rsv->size += num_bytes; + else if (block_rsv->reserved >= block_rsv->size) + block_rsv->full = 1; + spin_unlock(&block_rsv->lock); +} + +void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, + struct btrfs_block_rsv *dest, u64 num_bytes) +{ + struct btrfs_space_info *space_info = block_rsv->space_info; + + spin_lock(&block_rsv->lock); + if (num_bytes == (u64)-1) + num_bytes = block_rsv->size; + block_rsv->size -= num_bytes; + if (block_rsv->reserved >= block_rsv->size) { + num_bytes = block_rsv->reserved - block_rsv->size; + block_rsv->reserved = block_rsv->size; + block_rsv->full = 1; + } else { + num_bytes = 0; + } + spin_unlock(&block_rsv->lock); + + if (num_bytes > 0) { + if (dest) { + block_rsv_add_bytes(dest, num_bytes, 0); + } else { + spin_lock(&space_info->lock); + space_info->bytes_reserved -= num_bytes; + spin_unlock(&space_info->lock); } - spin_lock(&meta_sinfo->lock); - meta_sinfo->bytes_delalloc -= num_bytes; - spin_unlock(&meta_sinfo->lock); - printk(KERN_ERR "enospc, has %d, reserved %d\n", - BTRFS_I(inode)->outstanding_extents, - BTRFS_I(inode)->reserved_extents); - dump_space_info(meta_sinfo, 0, 0); - return -ENOSPC; } +} - BTRFS_I(inode)->reserved_extents += num_items; - check_force_delalloc(meta_sinfo); - spin_unlock(&meta_sinfo->lock); +static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src, + struct btrfs_block_rsv *dst, u64 num_bytes) +{ + int ret; - if (!flushed && force_delalloc) - filemap_flush(inode->i_mapping); + ret = block_rsv_use_bytes(src, num_bytes); + if (ret) + return ret; + block_rsv_add_bytes(dst, num_bytes, 1); return 0; } -/* - * unreserve num_items number of items worth of metadata space. This needs to - * be paired with btrfs_reserve_metadata_space. - * - * NOTE: if you have the option, run this _AFTER_ you do a - * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref - * oprations which will result in more used metadata, so we want to make sure we - * can do that without issue. - */ -int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items) +void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv) { - struct btrfs_fs_info *info = root->fs_info; - struct btrfs_space_info *meta_sinfo; - u64 num_bytes; + memset(rsv, 0, sizeof(*rsv)); + spin_lock_init(&rsv->lock); + atomic_set(&rsv->usage, 1); + rsv->priority = 6; + INIT_LIST_HEAD(&rsv->list); +} + +struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root) +{ + struct btrfs_block_rsv *block_rsv; + struct btrfs_fs_info *fs_info = root->fs_info; u64 alloc_target; - bool bug = false; - /* get the space info for where the metadata will live */ - alloc_target = btrfs_get_alloc_profile(root, 0); - meta_sinfo = __find_space_info(info, alloc_target); + block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); + if (!block_rsv) + return NULL; - num_bytes = calculate_bytes_needed(root, num_items); + btrfs_init_block_rsv(block_rsv); - spin_lock(&meta_sinfo->lock); - if (meta_sinfo->bytes_may_use < num_bytes) { - bug = true; - meta_sinfo->bytes_may_use = 0; - } else { - meta_sinfo->bytes_may_use -= num_bytes; - } - spin_unlock(&meta_sinfo->lock); + alloc_target = btrfs_get_alloc_profile(root, 0); + block_rsv->space_info = __find_space_info(fs_info, + BTRFS_BLOCK_GROUP_METADATA); - BUG_ON(bug); + return block_rsv; +} - return 0; +void btrfs_free_block_rsv(struct btrfs_root *root, + struct btrfs_block_rsv *rsv) +{ + if (rsv && atomic_dec_and_test(&rsv->usage)) { + btrfs_block_rsv_release(root, rsv, (u64)-1); + if (!rsv->durable) + kfree(rsv); + } } /* - * Reserve some metadata space for use. We'll calculate the worste case number - * of bytes that would be needed to modify num_items number of items. If we - * have space, fantastic, if not, you get -ENOSPC. Please call - * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of - * items you reserved, since whatever metadata you needed should have already - * been allocated. - * - * This will commit the transaction to make more space if we don't have enough - * metadata space. THe only time we don't do this is if we're reserving space - * inside of a transaction, then we will just return -ENOSPC and it is the - * callers responsibility to handle it properly. + * make the block_rsv struct be able to capture freed space. + * the captured space will re-add to the the block_rsv struct + * after transaction commit */ -int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items) +void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *block_rsv) { - struct btrfs_fs_info *info = root->fs_info; - struct btrfs_space_info *meta_sinfo; - u64 num_bytes; - u64 used; - u64 alloc_target; - int retries = 0; + block_rsv->durable = 1; + mutex_lock(&fs_info->durable_block_rsv_mutex); + list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list); + mutex_unlock(&fs_info->durable_block_rsv_mutex); +} - /* get the space info for where the metadata will live */ - alloc_target = btrfs_get_alloc_profile(root, 0); - meta_sinfo = __find_space_info(info, alloc_target); +int btrfs_block_rsv_add(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 num_bytes, int *retries) +{ + int ret; - num_bytes = calculate_bytes_needed(root, num_items); + if (num_bytes == 0) + return 0; again: - spin_lock(&meta_sinfo->lock); + ret = reserve_metadata_bytes(block_rsv, num_bytes); + if (!ret) { + block_rsv_add_bytes(block_rsv, num_bytes, 1); + return 0; + } - if (unlikely(!meta_sinfo->bytes_root)) - meta_sinfo->bytes_root = calculate_bytes_needed(root, 6); + ret = should_retry_reserve(trans, root, block_rsv, num_bytes, retries); + if (ret > 0) + goto again; + + return ret; +} - if (!retries) - meta_sinfo->bytes_may_use += num_bytes; +int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 min_reserved, int min_factor) +{ + u64 num_bytes = 0; + int commit_trans = 0; + int ret = -ENOSPC; - used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved + - meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly + - meta_sinfo->bytes_super + meta_sinfo->bytes_root + - meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc; + if (!block_rsv) + return 0; - if (used > meta_sinfo->total_bytes) { - retries++; - if (retries == 1) { - if (maybe_allocate_chunk(root, meta_sinfo)) - goto again; - retries++; - } else { - spin_unlock(&meta_sinfo->lock); - } + spin_lock(&block_rsv->lock); + if (min_factor > 0) + num_bytes = div_factor(block_rsv->size, min_factor); + if (min_reserved > num_bytes) + num_bytes = min_reserved; - if (retries == 2) { - flush_delalloc(root, meta_sinfo); - goto again; + if (block_rsv->reserved >= num_bytes) { + ret = 0; + } else { + num_bytes -= block_rsv->reserved; + if (block_rsv->durable && + block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes) + commit_trans = 1; + } + spin_unlock(&block_rsv->lock); + if (!ret) + return 0; + + if (block_rsv->refill_used) { + ret = reserve_metadata_bytes(block_rsv, num_bytes); + if (!ret) { + block_rsv_add_bytes(block_rsv, num_bytes, 0); + return 0; } - spin_lock(&meta_sinfo->lock); - meta_sinfo->bytes_may_use -= num_bytes; - spin_unlock(&meta_sinfo->lock); + } - dump_space_info(meta_sinfo, 0, 0); - return -ENOSPC; + if (commit_trans) { + if (trans) + return -EAGAIN; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); + ret = btrfs_commit_transaction(trans, root); + return 0; } - check_force_delalloc(meta_sinfo); - spin_unlock(&meta_sinfo->lock); + WARN_ON(1); + printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", + block_rsv->size, block_rsv->reserved, + block_rsv->freed[0], block_rsv->freed[1]); - return 0; + return -ENOSPC; +} + +int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, + struct btrfs_block_rsv *dst_rsv, + u64 num_bytes) +{ + return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); +} + +void btrfs_block_rsv_release(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, + u64 num_bytes) +{ + struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; + if (global_rsv->full || global_rsv == block_rsv || + block_rsv->space_info != global_rsv->space_info) + global_rsv = NULL; + block_rsv_release_bytes(block_rsv, global_rsv, num_bytes); } /* - * This will check the space that the inode allocates from to make sure we have - * enough space for bytes. + * helper to calculate size of global block reservation. + * the desired value is sum of space used by extent tree, + * checksum tree and root tree */ -int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode, - u64 bytes) +static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) { - struct btrfs_space_info *data_sinfo; - u64 used; - int ret = 0, committed = 0, flushed = 0; + struct btrfs_space_info *sinfo; + u64 num_bytes; + u64 meta_used; + u64 data_used; + int csum_size = btrfs_super_csum_size(&fs_info->super_copy); +#if 0 + /* + * per tree used space accounting can be inaccuracy, so we + * can't rely on it. + */ + spin_lock(&fs_info->extent_root->accounting_lock); + num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); + spin_unlock(&fs_info->extent_root->accounting_lock); - /* make sure bytes are sectorsize aligned */ - bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); + spin_lock(&fs_info->csum_root->accounting_lock); + num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); + spin_unlock(&fs_info->csum_root->accounting_lock); - data_sinfo = BTRFS_I(inode)->space_info; - if (!data_sinfo) - goto alloc; + spin_lock(&fs_info->tree_root->accounting_lock); + num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); + spin_unlock(&fs_info->tree_root->accounting_lock); +#endif + sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); + spin_lock(&sinfo->lock); + data_used = sinfo->bytes_used; + spin_unlock(&sinfo->lock); -again: - /* make sure we have enough space to handle the data first */ - spin_lock(&data_sinfo->lock); - used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc + - data_sinfo->bytes_reserved + data_sinfo->bytes_pinned + - data_sinfo->bytes_readonly + data_sinfo->bytes_may_use + - data_sinfo->bytes_super; + sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); + spin_lock(&sinfo->lock); + meta_used = sinfo->bytes_used; + spin_unlock(&sinfo->lock); - if (used + bytes > data_sinfo->total_bytes) { - struct btrfs_trans_handle *trans; + num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) * + csum_size * 2; + num_bytes += div64_u64(data_used + meta_used, 50); - if (!flushed) { - spin_unlock(&data_sinfo->lock); - flush_delalloc(root, data_sinfo); - flushed = 1; - goto again; - } + if (num_bytes * 3 > meta_used) + num_bytes = div64_u64(meta_used, 3); - /* - * if we don't have enough free bytes in this space then we need - * to alloc a new chunk. - */ - if (!data_sinfo->full) { - u64 alloc_target; + return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); +} - data_sinfo->force_alloc = 1; - spin_unlock(&data_sinfo->lock); -alloc: - alloc_target = btrfs_get_alloc_profile(root, 1); - trans = btrfs_start_transaction(root, 1); - if (!trans) - return -ENOMEM; +static void update_global_block_rsv(struct btrfs_fs_info *fs_info) +{ + struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; + struct btrfs_space_info *sinfo = block_rsv->space_info; + u64 num_bytes; - ret = do_chunk_alloc(trans, root->fs_info->extent_root, - bytes + 2 * 1024 * 1024, - alloc_target, 0); - btrfs_end_transaction(trans, root); - if (ret) - return ret; + num_bytes = calc_global_metadata_size(fs_info); - if (!data_sinfo) { - btrfs_set_inode_space_info(root, inode); - data_sinfo = BTRFS_I(inode)->space_info; - } - goto again; - } - spin_unlock(&data_sinfo->lock); + spin_lock(&block_rsv->lock); + spin_lock(&sinfo->lock); - /* commit the current transaction and try again */ - if (!committed && !root->fs_info->open_ioctl_trans) { - committed = 1; - trans = btrfs_join_transaction(root, 1); - if (!trans) - return -ENOMEM; - ret = btrfs_commit_transaction(trans, root); - if (ret) - return ret; - goto again; - } + block_rsv->size = num_bytes; - printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes" - ", %llu bytes_used, %llu bytes_reserved, " - "%llu bytes_pinned, %llu bytes_readonly, %llu may use " - "%llu total\n", (unsigned long long)bytes, - (unsigned long long)data_sinfo->bytes_delalloc, - (unsigned long long)data_sinfo->bytes_used, - (unsigned long long)data_sinfo->bytes_reserved, - (unsigned long long)data_sinfo->bytes_pinned, - (unsigned long long)data_sinfo->bytes_readonly, - (unsigned long long)data_sinfo->bytes_may_use, - (unsigned long long)data_sinfo->total_bytes); - return -ENOSPC; + num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + + sinfo->bytes_reserved + sinfo->bytes_readonly; + + if (sinfo->total_bytes > num_bytes) { + num_bytes = sinfo->total_bytes - num_bytes; + block_rsv->reserved += num_bytes; + sinfo->bytes_reserved += num_bytes; } - data_sinfo->bytes_may_use += bytes; - BTRFS_I(inode)->reserved_bytes += bytes; - spin_unlock(&data_sinfo->lock); - return 0; + if (block_rsv->reserved >= block_rsv->size) { + num_bytes = block_rsv->reserved - block_rsv->size; + sinfo->bytes_reserved -= num_bytes; + block_rsv->reserved = block_rsv->size; + block_rsv->full = 1; + } +#if 0 + printk(KERN_INFO"global block rsv size %llu reserved %llu\n", + block_rsv->size, block_rsv->reserved); +#endif + spin_unlock(&sinfo->lock); + spin_unlock(&block_rsv->lock); } -/* - * if there was an error for whatever reason after calling - * btrfs_check_data_free_space, call this so we can cleanup the counters. - */ -void btrfs_free_reserved_data_space(struct btrfs_root *root, - struct inode *inode, u64 bytes) +static void init_global_block_rsv(struct btrfs_fs_info *fs_info) { - struct btrfs_space_info *data_sinfo; + struct btrfs_space_info *space_info; - /* make sure bytes are sectorsize aligned */ - bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); + space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); + fs_info->chunk_block_rsv.space_info = space_info; + fs_info->chunk_block_rsv.priority = 10; - data_sinfo = BTRFS_I(inode)->space_info; - spin_lock(&data_sinfo->lock); - data_sinfo->bytes_may_use -= bytes; - BTRFS_I(inode)->reserved_bytes -= bytes; - spin_unlock(&data_sinfo->lock); + space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); + fs_info->global_block_rsv.space_info = space_info; + fs_info->global_block_rsv.priority = 10; + fs_info->global_block_rsv.refill_used = 1; + fs_info->delalloc_block_rsv.space_info = space_info; + fs_info->trans_block_rsv.space_info = space_info; + fs_info->empty_block_rsv.space_info = space_info; + fs_info->empty_block_rsv.priority = 10; + + fs_info->extent_root->block_rsv = &fs_info->global_block_rsv; + fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; + fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; + fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; + fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; + + btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv); + + btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv); + + update_global_block_rsv(fs_info); } -/* called when we are adding a delalloc extent to the inode's io_tree */ -void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode, - u64 bytes) +static void release_global_block_rsv(struct btrfs_fs_info *fs_info) { - struct btrfs_space_info *data_sinfo; + block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1); + WARN_ON(fs_info->delalloc_block_rsv.size > 0); + WARN_ON(fs_info->delalloc_block_rsv.reserved > 0); + WARN_ON(fs_info->trans_block_rsv.size > 0); + WARN_ON(fs_info->trans_block_rsv.reserved > 0); + WARN_ON(fs_info->chunk_block_rsv.size > 0); + WARN_ON(fs_info->chunk_block_rsv.reserved > 0); +} - /* get the space info for where this inode will be storing its data */ - data_sinfo = BTRFS_I(inode)->space_info; +static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) +{ + return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * + 3 * num_items; +} - /* make sure we have enough space to handle the data first */ - spin_lock(&data_sinfo->lock); - data_sinfo->bytes_delalloc += bytes; +int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + int num_items, int *retries) +{ + u64 num_bytes; + int ret; - /* - * we are adding a delalloc extent without calling - * btrfs_check_data_free_space first. This happens on a weird - * writepage condition, but shouldn't hurt our accounting - */ - if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) { - data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes; - BTRFS_I(inode)->reserved_bytes = 0; - } else { - data_sinfo->bytes_may_use -= bytes; - BTRFS_I(inode)->reserved_bytes -= bytes; - } + if (num_items == 0 || root->fs_info->chunk_root == root) + return 0; - spin_unlock(&data_sinfo->lock); + num_bytes = calc_trans_metadata_size(root, num_items); + ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, + num_bytes, retries); + if (!ret) { + trans->bytes_reserved += num_bytes; + trans->block_rsv = &root->fs_info->trans_block_rsv; + } + return ret; } -/* called when we are clearing an delalloc extent from the inode's io_tree */ -void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode, - u64 bytes) +void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { - struct btrfs_space_info *info; + if (!trans->bytes_reserved) + return; - info = BTRFS_I(inode)->space_info; + BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv); + btrfs_block_rsv_release(root, trans->block_rsv, + trans->bytes_reserved); + trans->bytes_reserved = 0; +} - spin_lock(&info->lock); - info->bytes_delalloc -= bytes; - spin_unlock(&info->lock); +int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); + struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; + + /* + * one for deleting orphan item, one for updating inode and + * two for calling btrfs_truncate_inode_items. + * + * btrfs_truncate_inode_items is a delete operation, it frees + * more space than it uses in most cases. So two units of + * metadata space should be enough for calling it many times. + * If all of the metadata space is used, we can commit + * transaction and use space it freed. + */ + u64 num_bytes = calc_trans_metadata_size(root, 4); + return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } -static void force_metadata_allocation(struct btrfs_fs_info *info) +void btrfs_orphan_release_metadata(struct inode *inode) { - struct list_head *head = &info->space_info; - struct btrfs_space_info *found; + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 num_bytes = calc_trans_metadata_size(root, 4); + btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); +} - rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) { - if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - found->force_alloc = 1; - } - rcu_read_unlock(); +int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending) +{ + struct btrfs_root *root = pending->root; + struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root); + struct btrfs_block_rsv *dst_rsv = &pending->block_rsv; + /* + * two for root back/forward refs, two for directory entries + * and one for root of the snapshot. + */ + u64 num_bytes = calc_trans_metadata_size(root, 5); + dst_rsv->space_info = src_rsv->space_info; + return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } -static int do_chunk_alloc(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, u64 alloc_bytes, - u64 flags, int force) +static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes) { - struct btrfs_space_info *space_info; - struct btrfs_fs_info *fs_info = extent_root->fs_info; - u64 thresh; - int ret = 0; + return num_bytes >>= 3; +} - mutex_lock(&fs_info->chunk_mutex); +int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv; + u64 to_reserve; + int nr_extents; + int retries = 0; + int ret; - flags = btrfs_reduce_alloc_profile(extent_root, flags); + if (btrfs_transaction_in_commit(root->fs_info)) + schedule_timeout(1); - space_info = __find_space_info(extent_root->fs_info, flags); - if (!space_info) { - ret = update_space_info(extent_root->fs_info, flags, - 0, 0, &space_info); - BUG_ON(ret); + num_bytes = ALIGN(num_bytes, root->sectorsize); +again: + spin_lock(&BTRFS_I(inode)->accounting_lock); + nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1; + if (nr_extents > BTRFS_I(inode)->reserved_extents) { + nr_extents -= BTRFS_I(inode)->reserved_extents; + to_reserve = calc_trans_metadata_size(root, nr_extents); + } else { + nr_extents = 0; + to_reserve = 0; } - BUG_ON(!space_info); - spin_lock(&space_info->lock); - if (space_info->force_alloc) - force = 1; - if (space_info->full) { - spin_unlock(&space_info->lock); - goto out; + to_reserve += calc_csum_metadata_size(inode, num_bytes); + ret = reserve_metadata_bytes(block_rsv, to_reserve); + if (ret) { + spin_unlock(&BTRFS_I(inode)->accounting_lock); + ret = should_retry_reserve(NULL, root, block_rsv, to_reserve, + &retries); + if (ret > 0) + goto again; + return ret; } - thresh = space_info->total_bytes - space_info->bytes_readonly; - thresh = div_factor(thresh, 8); - if (!force && - (space_info->bytes_used + space_info->bytes_pinned + - space_info->bytes_reserved + alloc_bytes) < thresh) { - spin_unlock(&space_info->lock); - goto out; - } - spin_unlock(&space_info->lock); + BTRFS_I(inode)->reserved_extents += nr_extents; + atomic_inc(&BTRFS_I(inode)->outstanding_extents); + spin_unlock(&BTRFS_I(inode)->accounting_lock); - /* - * if we're doing a data chunk, go ahead and make sure that - * we keep a reasonable number of metadata chunks allocated in the - * FS as well. - */ - if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { - fs_info->data_chunk_allocations++; - if (!(fs_info->data_chunk_allocations % - fs_info->metadata_ratio)) - force_metadata_allocation(fs_info); + block_rsv_add_bytes(block_rsv, to_reserve, 1); + + if (block_rsv->size > 512 * 1024 * 1024) + shrink_delalloc(NULL, root, to_reserve); + + return 0; +} + +void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 to_free; + int nr_extents; + + num_bytes = ALIGN(num_bytes, root->sectorsize); + atomic_dec(&BTRFS_I(inode)->outstanding_extents); + + spin_lock(&BTRFS_I(inode)->accounting_lock); + nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); + if (nr_extents < BTRFS_I(inode)->reserved_extents) { + nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents; + BTRFS_I(inode)->reserved_extents -= nr_extents; + } else { + nr_extents = 0; } + spin_unlock(&BTRFS_I(inode)->accounting_lock); - ret = btrfs_alloc_chunk(trans, extent_root, flags); - spin_lock(&space_info->lock); + to_free = calc_csum_metadata_size(inode, num_bytes); + if (nr_extents > 0) + to_free += calc_trans_metadata_size(root, nr_extents); + + btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, + to_free); +} + +int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes) +{ + int ret; + + ret = btrfs_check_data_free_space(inode, num_bytes); if (ret) - space_info->full = 1; - space_info->force_alloc = 0; - spin_unlock(&space_info->lock); -out: - mutex_unlock(&extent_root->fs_info->chunk_mutex); - return ret; + return ret; + + ret = btrfs_delalloc_reserve_metadata(inode, num_bytes); + if (ret) { + btrfs_free_reserved_data_space(inode, num_bytes); + return ret; + } + + return 0; +} + +void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) +{ + btrfs_delalloc_release_metadata(inode, num_bytes); + btrfs_free_reserved_data_space(inode, num_bytes); } static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc, - int mark_free) + u64 bytenr, u64 num_bytes, int alloc) { struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; + int factor; u64 total = num_bytes; u64 old_val; u64 byte_in_group; @@ -3486,6 +3798,12 @@ static int update_block_group(struct btrfs_trans_handle *trans, cache = btrfs_lookup_block_group(info, bytenr); if (!cache) return -1; + if (cache->flags & (BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10)) + factor = 2; + else + factor = 1; byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -3498,31 +3816,24 @@ static int update_block_group(struct btrfs_trans_handle *trans, old_val += num_bytes; btrfs_set_block_group_used(&cache->item, old_val); cache->reserved -= num_bytes; - cache->space_info->bytes_used += num_bytes; cache->space_info->bytes_reserved -= num_bytes; - if (cache->ro) - cache->space_info->bytes_readonly -= num_bytes; + cache->space_info->bytes_used += num_bytes; + cache->space_info->disk_used += num_bytes * factor; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); } else { old_val -= num_bytes; - cache->space_info->bytes_used -= num_bytes; - if (cache->ro) - cache->space_info->bytes_readonly += num_bytes; btrfs_set_block_group_used(&cache->item, old_val); + cache->pinned += num_bytes; + cache->space_info->bytes_pinned += num_bytes; + cache->space_info->bytes_used -= num_bytes; + cache->space_info->disk_used -= num_bytes * factor; spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - if (mark_free) { - int ret; - ret = btrfs_discard_extent(root, bytenr, - num_bytes); - WARN_ON(ret); - - ret = btrfs_add_free_space(cache, bytenr, - num_bytes); - WARN_ON(ret); - } + set_extent_dirty(info->pinned_extents, + bytenr, bytenr + num_bytes - 1, + GFP_NOFS | __GFP_NOFAIL); } btrfs_put_block_group(cache); total -= num_bytes; @@ -3546,18 +3857,10 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) return bytenr; } -/* - * this function must be called within transaction - */ -int btrfs_pin_extent(struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int reserved) +static int pin_down_extent(struct btrfs_root *root, + struct btrfs_block_group_cache *cache, + u64 bytenr, u64 num_bytes, int reserved) { - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_block_group_cache *cache; - - cache = btrfs_lookup_block_group(fs_info, bytenr); - BUG_ON(!cache); - spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned += num_bytes; @@ -3569,28 +3872,68 @@ int btrfs_pin_extent(struct btrfs_root *root, spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - btrfs_put_block_group(cache); + set_extent_dirty(root->fs_info->pinned_extents, bytenr, + bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL); + return 0; +} + +/* + * this function must be called within transaction + */ +int btrfs_pin_extent(struct btrfs_root *root, + u64 bytenr, u64 num_bytes, int reserved) +{ + struct btrfs_block_group_cache *cache; - set_extent_dirty(fs_info->pinned_extents, - bytenr, bytenr + num_bytes - 1, GFP_NOFS); + cache = btrfs_lookup_block_group(root->fs_info, bytenr); + BUG_ON(!cache); + + pin_down_extent(root, cache, bytenr, num_bytes, reserved); + + btrfs_put_block_group(cache); return 0; } -static int update_reserved_extents(struct btrfs_block_group_cache *cache, - u64 num_bytes, int reserve) +/* + * update size of reserved extents. this function may return -EAGAIN + * if 'reserve' is true or 'sinfo' is false. + */ +static int update_reserved_bytes(struct btrfs_block_group_cache *cache, + u64 num_bytes, int reserve, int sinfo) { - spin_lock(&cache->space_info->lock); - spin_lock(&cache->lock); - if (reserve) { - cache->reserved += num_bytes; - cache->space_info->bytes_reserved += num_bytes; + int ret = 0; + if (sinfo) { + struct btrfs_space_info *space_info = cache->space_info; + spin_lock(&space_info->lock); + spin_lock(&cache->lock); + if (reserve) { + if (cache->ro) { + ret = -EAGAIN; + } else { + cache->reserved += num_bytes; + space_info->bytes_reserved += num_bytes; + } + } else { + if (cache->ro) + space_info->bytes_readonly += num_bytes; + cache->reserved -= num_bytes; + space_info->bytes_reserved -= num_bytes; + } + spin_unlock(&cache->lock); + spin_unlock(&space_info->lock); } else { - cache->reserved -= num_bytes; - cache->space_info->bytes_reserved -= num_bytes; + spin_lock(&cache->lock); + if (cache->ro) { + ret = -EAGAIN; + } else { + if (reserve) + cache->reserved += num_bytes; + else + cache->reserved -= num_bytes; + } + spin_unlock(&cache->lock); } - spin_unlock(&cache->lock); - spin_unlock(&cache->space_info->lock); - return 0; + return ret; } int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, @@ -3621,6 +3964,8 @@ int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, fs_info->pinned_extents = &fs_info->freed_extents[0]; up_write(&fs_info->extent_commit_sem); + + update_global_block_rsv(fs_info); return 0; } @@ -3647,14 +3992,21 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) btrfs_add_free_space(cache, start, len); } + start += len; + spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned -= len; cache->space_info->bytes_pinned -= len; + if (cache->ro) { + cache->space_info->bytes_readonly += len; + } else if (cache->reserved_pinned > 0) { + len = min(len, cache->reserved_pinned); + cache->reserved_pinned -= len; + cache->space_info->bytes_reserved += len; + } spin_unlock(&cache->lock); spin_unlock(&cache->space_info->lock); - - start += len; } if (cache) @@ -3667,8 +4019,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_io_tree *unpin; + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *next_rsv; u64 start; u64 end; + int idx; int ret; if (fs_info->pinned_extents == &fs_info->freed_extents[0]) @@ -3689,59 +4044,30 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, cond_resched(); } - return ret; -} - -static int pin_down_bytes(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - u64 bytenr, u64 num_bytes, - int is_data, int reserved, - struct extent_buffer **must_clean) -{ - int err = 0; - struct extent_buffer *buf; - - if (is_data) - goto pinit; - - /* - * discard is sloooow, and so triggering discards on - * individual btree blocks isn't a good plan. Just - * pin everything in discard mode. - */ - if (btrfs_test_opt(root, DISCARD)) - goto pinit; + mutex_lock(&fs_info->durable_block_rsv_mutex); + list_for_each_entry_safe(block_rsv, next_rsv, + &fs_info->durable_block_rsv_list, list) { - buf = btrfs_find_tree_block(root, bytenr, num_bytes); - if (!buf) - goto pinit; + idx = trans->transid & 0x1; + if (block_rsv->freed[idx] > 0) { + block_rsv_add_bytes(block_rsv, + block_rsv->freed[idx], 0); + block_rsv->freed[idx] = 0; + } + if (atomic_read(&block_rsv->usage) == 0) { + btrfs_block_rsv_release(root, block_rsv, (u64)-1); - /* we can reuse a block if it hasn't been written - * and it is from this transaction. We can't - * reuse anything from the tree log root because - * it has tiny sub-transactions. - */ - if (btrfs_buffer_uptodate(buf, 0) && - btrfs_try_tree_lock(buf)) { - u64 header_owner = btrfs_header_owner(buf); - u64 header_transid = btrfs_header_generation(buf); - if (header_owner != BTRFS_TREE_LOG_OBJECTID && - header_transid == trans->transid && - !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { - *must_clean = buf; - return 1; + if (block_rsv->freed[0] == 0 && + block_rsv->freed[1] == 0) { + list_del_init(&block_rsv->list); + kfree(block_rsv); + } + } else { + btrfs_block_rsv_release(root, block_rsv, 0); } - btrfs_tree_unlock(buf); } - free_extent_buffer(buf); -pinit: - if (path) - btrfs_set_path_blocking(path); - /* unlocks the pinned mutex */ - btrfs_pin_extent(root, bytenr, num_bytes, reserved); + mutex_unlock(&fs_info->durable_block_rsv_mutex); - BUG_ON(err < 0); return 0; } @@ -3902,9 +4228,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); } } else { - int mark_free = 0; - struct extent_buffer *must_clean = NULL; - if (found_extent) { BUG_ON(is_data && refs_to_drop != extent_data_ref_count(root, path, iref)); @@ -3917,31 +4240,11 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } - ret = pin_down_bytes(trans, root, path, bytenr, - num_bytes, is_data, 0, &must_clean); - if (ret > 0) - mark_free = 1; - BUG_ON(ret < 0); - /* - * it is going to be very rare for someone to be waiting - * on the block we're freeing. del_items might need to - * schedule, so rather than get fancy, just force it - * to blocking here - */ - if (must_clean) - btrfs_set_lock_blocking(must_clean); - ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); BUG_ON(ret); btrfs_release_path(extent_root, path); - if (must_clean) { - clean_tree_block(NULL, root, must_clean); - btrfs_tree_unlock(must_clean); - free_extent_buffer(must_clean); - } - if (is_data) { ret = btrfs_del_csums(trans, root, bytenr, num_bytes); BUG_ON(ret); @@ -3951,8 +4254,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT); } - ret = update_block_group(trans, root, bytenr, num_bytes, 0, - mark_free); + ret = update_block_group(trans, root, bytenr, num_bytes, 0); BUG_ON(ret); } btrfs_free_path(path); @@ -3960,7 +4262,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } /* - * when we free an extent, it is possible (and likely) that we free the last + * when we free an block, it is possible (and likely) that we free the last * delayed ref for that extent as well. This searches the delayed ref tree for * a given extent, and if there are no other delayed refs to be processed, it * removes it from the tree. @@ -3972,7 +4274,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct rb_node *node; - int ret; + int ret = 0; delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); @@ -4024,17 +4326,99 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, list_del_init(&head->cluster); spin_unlock(&delayed_refs->lock); - ret = run_one_delayed_ref(trans, root->fs_info->tree_root, - &head->node, head->extent_op, - head->must_insert_reserved); - BUG_ON(ret); + BUG_ON(head->extent_op); + if (head->must_insert_reserved) + ret = 1; + + mutex_unlock(&head->mutex); btrfs_put_delayed_ref(&head->node); - return 0; + return ret; out: spin_unlock(&delayed_refs->lock); return 0; } +void btrfs_free_tree_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *buf, + u64 parent, int last_ref) +{ + struct btrfs_block_rsv *block_rsv; + struct btrfs_block_group_cache *cache = NULL; + int ret; + + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { + ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len, + parent, root->root_key.objectid, + btrfs_header_level(buf), + BTRFS_DROP_DELAYED_REF, NULL); + BUG_ON(ret); + } + + if (!last_ref) + return; + + block_rsv = get_block_rsv(trans, root); + cache = btrfs_lookup_block_group(root->fs_info, buf->start); + BUG_ON(block_rsv->space_info != cache->space_info); + + if (btrfs_header_generation(buf) == trans->transid) { + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { + ret = check_ref_cleanup(trans, root, buf->start); + if (!ret) + goto pin; + } + + if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { + pin_down_extent(root, cache, buf->start, buf->len, 1); + goto pin; + } + + WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); + + btrfs_add_free_space(cache, buf->start, buf->len); + ret = update_reserved_bytes(cache, buf->len, 0, 0); + if (ret == -EAGAIN) { + /* block group became read-only */ + update_reserved_bytes(cache, buf->len, 0, 1); + goto out; + } + + ret = 1; + spin_lock(&block_rsv->lock); + if (block_rsv->reserved < block_rsv->size) { + block_rsv->reserved += buf->len; + ret = 0; + } + spin_unlock(&block_rsv->lock); + + if (ret) { + spin_lock(&cache->space_info->lock); + cache->space_info->bytes_reserved -= buf->len; + spin_unlock(&cache->space_info->lock); + } + goto out; + } +pin: + if (block_rsv->durable && !cache->ro) { + ret = 0; + spin_lock(&cache->lock); + if (!cache->ro) { + cache->reserved_pinned += buf->len; + ret = 1; + } + spin_unlock(&cache->lock); + + if (ret) { + spin_lock(&block_rsv->lock); + block_rsv->freed[trans->transid & 0x1] += buf->len; + spin_unlock(&block_rsv->lock); + } + } +out: + btrfs_put_block_group(cache); +} + int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -4056,8 +4440,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, parent, root_objectid, (int)owner, BTRFS_DROP_DELAYED_REF, NULL); BUG_ON(ret); - ret = check_ref_cleanup(trans, root, bytenr); - BUG_ON(ret); } else { ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes, parent, root_objectid, owner, @@ -4067,21 +4449,6 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, return ret; } -int btrfs_free_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 bytenr, u32 blocksize, - u64 parent, u64 root_objectid, int level) -{ - u64 used; - spin_lock(&root->node_lock); - used = btrfs_root_used(&root->root_item) - blocksize; - btrfs_set_root_used(&root->root_item, used); - spin_unlock(&root->node_lock); - - return btrfs_free_extent(trans, root, bytenr, blocksize, - parent, root_objectid, level, 0); -} - static u64 stripe_align(struct btrfs_root *root, u64 val) { u64 mask = ((u64)root->stripesize - 1); @@ -4134,6 +4501,22 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache) return 0; } +static int get_block_group_index(struct btrfs_block_group_cache *cache) +{ + int index; + if (cache->flags & BTRFS_BLOCK_GROUP_RAID10) + index = 0; + else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1) + index = 1; + else if (cache->flags & BTRFS_BLOCK_GROUP_DUP) + index = 2; + else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0) + index = 3; + else + index = 4; + return index; +} + enum btrfs_loop_type { LOOP_FIND_IDEAL = 0, LOOP_CACHING_NOWAIT = 1, @@ -4155,7 +4538,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, u64 hint_byte, struct btrfs_key *ins, - u64 exclude_start, u64 exclude_nr, int data) { int ret = 0; @@ -4168,6 +4550,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_space_info *space_info; int last_ptr_loop = 0; int loop = 0; + int index = 0; bool found_uncached_bg = false; bool failed_cluster_refill = false; bool failed_alloc = false; @@ -4237,6 +4620,7 @@ ideal_cache: btrfs_put_block_group(block_group); up_read(&space_info->groups_sem); } else { + index = get_block_group_index(block_group); goto have_block_group; } } else if (block_group) { @@ -4245,7 +4629,8 @@ ideal_cache: } search: down_read(&space_info->groups_sem); - list_for_each_entry(block_group, &space_info->block_groups, list) { + list_for_each_entry(block_group, &space_info->block_groups[index], + list) { u64 offset; int cached; @@ -4436,23 +4821,22 @@ checks: goto loop; } - if (exclude_nr > 0 && - (search_start + num_bytes > exclude_start && - search_start < exclude_start + exclude_nr)) { - search_start = exclude_start + exclude_nr; + ins->objectid = search_start; + ins->offset = num_bytes; + + if (offset < search_start) + btrfs_add_free_space(block_group, offset, + search_start - offset); + BUG_ON(offset > search_start); + ret = update_reserved_bytes(block_group, num_bytes, 1, + (data & BTRFS_BLOCK_GROUP_DATA)); + if (ret == -EAGAIN) { btrfs_add_free_space(block_group, offset, num_bytes); - /* - * if search_start is still in this block group - * then we just re-search this block group - */ - if (search_start >= block_group->key.objectid && - search_start < (block_group->key.objectid + - block_group->key.offset)) - goto have_block_group; goto loop; } + /* we are all good, lets return */ ins->objectid = search_start; ins->offset = num_bytes; @@ -4460,18 +4844,18 @@ checks: btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); - - update_reserved_extents(block_group, num_bytes, 1); - - /* we are all good, lets return */ break; loop: failed_cluster_refill = false; failed_alloc = false; + BUG_ON(index != get_block_group_index(block_group)); btrfs_put_block_group(block_group); } up_read(&space_info->groups_sem); + if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES) + goto search; + /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for * for them to make caching progress. Also * determine the best possible bg to cache @@ -4485,6 +4869,7 @@ loop: if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && (found_uncached_bg || empty_size || empty_cluster || allowed_chunk_alloc)) { + index = 0; if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { found_uncached_bg = false; loop++; @@ -4567,31 +4952,30 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes, int dump_block_groups) { struct btrfs_block_group_cache *cache; + int index = 0; spin_lock(&info->lock); printk(KERN_INFO "space_info has %llu free, is %sfull\n", (unsigned long long)(info->total_bytes - info->bytes_used - info->bytes_pinned - info->bytes_reserved - - info->bytes_super), + info->bytes_readonly), (info->full) ? "" : "not "); - printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu," - " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu" - "\n", + printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, " + "reserved=%llu, may_use=%llu, readonly=%llu\n", (unsigned long long)info->total_bytes, + (unsigned long long)info->bytes_used, (unsigned long long)info->bytes_pinned, - (unsigned long long)info->bytes_delalloc, + (unsigned long long)info->bytes_reserved, (unsigned long long)info->bytes_may_use, - (unsigned long long)info->bytes_used, - (unsigned long long)info->bytes_root, - (unsigned long long)info->bytes_super, - (unsigned long long)info->bytes_reserved); + (unsigned long long)info->bytes_readonly); spin_unlock(&info->lock); if (!dump_block_groups) return; down_read(&info->groups_sem); - list_for_each_entry(cache, &info->block_groups, list) { +again: + list_for_each_entry(cache, &info->block_groups[index], list) { spin_lock(&cache->lock); printk(KERN_INFO "block group %llu has %llu bytes, %llu used " "%llu pinned %llu reserved\n", @@ -4603,6 +4987,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes, btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); } + if (++index < BTRFS_NR_RAID_TYPES) + goto again; up_read(&info->groups_sem); } @@ -4628,9 +5014,8 @@ again: WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(trans, root, num_bytes, empty_size, - search_start, search_end, hint_byte, ins, - trans->alloc_exclude_start, - trans->alloc_exclude_nr, data); + search_start, search_end, hint_byte, + ins, data); if (ret == -ENOSPC && num_bytes > min_alloc_size) { num_bytes = num_bytes >> 1; @@ -4668,7 +5053,7 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) ret = btrfs_discard_extent(root, start, len); btrfs_add_free_space(cache, start, len); - update_reserved_extents(cache, len, 0); + update_reserved_bytes(cache, len, 0, 1); btrfs_put_block_group(cache); return ret; @@ -4731,8 +5116,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); - ret = update_block_group(trans, root, ins->objectid, ins->offset, - 1, 0); + ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { printk(KERN_ERR "btrfs update block group failed for %llu " "%llu\n", (unsigned long long)ins->objectid, @@ -4792,8 +5176,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); - ret = update_block_group(trans, root, ins->objectid, ins->offset, - 1, 0); + ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { printk(KERN_ERR "btrfs update block group failed for %llu " "%llu\n", (unsigned long long)ins->objectid, @@ -4869,73 +5252,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, put_caching_control(caching_ctl); } - update_reserved_extents(block_group, ins->offset, 1); + ret = update_reserved_bytes(block_group, ins->offset, 1, 1); + BUG_ON(ret); btrfs_put_block_group(block_group); ret = alloc_reserved_file_extent(trans, root, 0, root_objectid, 0, owner, offset, ins, 1); return ret; } -/* - * finds a free extent and does all the dirty work required for allocation - * returns the key for the extent through ins, and a tree buffer for - * the first block of the extent through buf. - * - * returns 0 if everything worked, non-zero otherwise. - */ -static int alloc_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 num_bytes, u64 parent, u64 root_objectid, - struct btrfs_disk_key *key, int level, - u64 empty_size, u64 hint_byte, u64 search_end, - struct btrfs_key *ins) -{ - int ret; - u64 flags = 0; - - ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes, - empty_size, hint_byte, search_end, - ins, 0); - if (ret) - return ret; - - if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { - if (parent == 0) - parent = ins->objectid; - flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; - } else - BUG_ON(parent > 0); - - if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { - struct btrfs_delayed_extent_op *extent_op; - extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); - BUG_ON(!extent_op); - if (key) - memcpy(&extent_op->key, key, sizeof(extent_op->key)); - else - memset(&extent_op->key, 0, sizeof(extent_op->key)); - extent_op->flags_to_set = flags; - extent_op->update_key = 1; - extent_op->update_flags = 1; - extent_op->is_data = 0; - - ret = btrfs_add_delayed_tree_ref(trans, ins->objectid, - ins->offset, parent, root_objectid, - level, BTRFS_ADD_DELAYED_EXTENT, - extent_op); - BUG_ON(ret); - } - - if (root_objectid == root->root_key.objectid) { - u64 used; - spin_lock(&root->node_lock); - used = btrfs_root_used(&root->root_item) + num_bytes; - btrfs_set_root_used(&root->root_item, used); - spin_unlock(&root->node_lock); - } - return ret; -} - struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u32 blocksize, @@ -4974,8 +5298,45 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, return buf; } +static struct btrfs_block_rsv * +use_block_rsv(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u32 blocksize) +{ + struct btrfs_block_rsv *block_rsv; + int ret; + + block_rsv = get_block_rsv(trans, root); + + if (block_rsv->size == 0) { + ret = reserve_metadata_bytes(block_rsv, blocksize); + if (ret) + return ERR_PTR(ret); + return block_rsv; + } + + ret = block_rsv_use_bytes(block_rsv, blocksize); + if (!ret) + return block_rsv; + + WARN_ON(1); + printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", + block_rsv->size, block_rsv->reserved, + block_rsv->freed[0], block_rsv->freed[1]); + + return ERR_PTR(-ENOSPC); +} + +static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize) +{ + block_rsv_add_bytes(block_rsv, blocksize, 0); + block_rsv_release_bytes(block_rsv, NULL, 0); +} + /* - * helper function to allocate a block for a given tree + * finds a free extent and does all the dirty work required for allocation + * returns the key for the extent through ins, and a tree buffer for + * the first block of the extent through buf. + * * returns the tree buffer or NULL. */ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, @@ -4985,18 +5346,53 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, u64 hint, u64 empty_size) { struct btrfs_key ins; - int ret; + struct btrfs_block_rsv *block_rsv; struct extent_buffer *buf; + u64 flags = 0; + int ret; + - ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid, - key, level, empty_size, hint, (u64)-1, &ins); + block_rsv = use_block_rsv(trans, root, blocksize); + if (IS_ERR(block_rsv)) + return ERR_CAST(block_rsv); + + ret = btrfs_reserve_extent(trans, root, blocksize, blocksize, + empty_size, hint, (u64)-1, &ins, 0); if (ret) { - BUG_ON(ret > 0); + unuse_block_rsv(block_rsv, blocksize); return ERR_PTR(ret); } buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize, level); + BUG_ON(IS_ERR(buf)); + + if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { + if (parent == 0) + parent = ins.objectid; + flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; + } else + BUG_ON(parent > 0); + + if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { + struct btrfs_delayed_extent_op *extent_op; + extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); + BUG_ON(!extent_op); + if (key) + memcpy(&extent_op->key, key, sizeof(extent_op->key)); + else + memset(&extent_op->key, 0, sizeof(extent_op->key)); + extent_op->flags_to_set = flags; + extent_op->update_key = 1; + extent_op->update_flags = 1; + extent_op->is_data = 0; + + ret = btrfs_add_delayed_tree_ref(trans, ins.objectid, + ins.offset, parent, root_objectid, + level, BTRFS_ADD_DELAYED_EXTENT, + extent_op); + BUG_ON(ret); + } return buf; } @@ -5321,7 +5717,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, struct btrfs_path *path, struct walk_control *wc) { - int ret = 0; + int ret; int level = wc->level; struct extent_buffer *eb = path->nodes[level]; u64 parent = 0; @@ -5399,13 +5795,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, btrfs_header_owner(path->nodes[level + 1])); } - ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent, - root->root_key.objectid, level, 0); - BUG_ON(ret); + btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); out: wc->refs[level] = 0; wc->flags[level] = 0; - return ret; + return 0; } static noinline int walk_down_tree(struct btrfs_trans_handle *trans, @@ -5483,7 +5877,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, * also make sure backrefs for the shared block and all lower level * blocks are properly updated. */ -int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) +int btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref) { struct btrfs_path *path; struct btrfs_trans_handle *trans; @@ -5501,7 +5896,9 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) wc = kzalloc(sizeof(*wc), GFP_NOFS); BUG_ON(!wc); - trans = btrfs_start_transaction(tree_root, 1); + trans = btrfs_start_transaction(tree_root, 0); + if (block_rsv) + trans->block_rsv = block_rsv; if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { level = btrfs_header_level(root->node); @@ -5589,22 +5986,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) } BUG_ON(wc->level == 0); - if (trans->transaction->in_commit || - trans->transaction->delayed_refs.flushing) { + if (btrfs_should_end_transaction(trans, tree_root)) { ret = btrfs_update_root(trans, tree_root, &root->root_key, root_item); BUG_ON(ret); - btrfs_end_transaction(trans, tree_root); - trans = btrfs_start_transaction(tree_root, 1); - } else { - unsigned long update; - update = trans->delayed_ref_updates; - trans->delayed_ref_updates = 0; - if (update) - btrfs_run_delayed_refs(trans, tree_root, - update); + btrfs_end_transaction_throttle(trans, tree_root); + trans = btrfs_start_transaction(tree_root, 0); + if (block_rsv) + trans->block_rsv = block_rsv; } } btrfs_release_path(root, path); @@ -5632,7 +6023,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref) kfree(root); } out: - btrfs_end_transaction(trans, tree_root); + btrfs_end_transaction_throttle(trans, tree_root); kfree(wc); btrfs_free_path(path); return err; @@ -7228,48 +7619,80 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) return flags; } -static int __alloc_chunk_for_shrink(struct btrfs_root *root, - struct btrfs_block_group_cache *shrink_block_group, - int force) +static int set_block_group_ro(struct btrfs_block_group_cache *cache) { - struct btrfs_trans_handle *trans; - u64 new_alloc_flags; - u64 calc; + struct btrfs_space_info *sinfo = cache->space_info; + u64 num_bytes; + int ret = -ENOSPC; - spin_lock(&shrink_block_group->lock); - if (btrfs_block_group_used(&shrink_block_group->item) + - shrink_block_group->reserved > 0) { - spin_unlock(&shrink_block_group->lock); + if (cache->ro) + return 0; - trans = btrfs_start_transaction(root, 1); - spin_lock(&shrink_block_group->lock); + spin_lock(&sinfo->lock); + spin_lock(&cache->lock); + num_bytes = cache->key.offset - cache->reserved - cache->pinned - + cache->bytes_super - btrfs_block_group_used(&cache->item); + + if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + + sinfo->bytes_may_use + sinfo->bytes_readonly + + cache->reserved_pinned + num_bytes < sinfo->total_bytes) { + sinfo->bytes_readonly += num_bytes; + sinfo->bytes_reserved += cache->reserved_pinned; + cache->reserved_pinned = 0; + cache->ro = 1; + ret = 0; + } + spin_unlock(&cache->lock); + spin_unlock(&sinfo->lock); + return ret; +} - new_alloc_flags = update_block_group_flags(root, - shrink_block_group->flags); - if (new_alloc_flags != shrink_block_group->flags) { - calc = - btrfs_block_group_used(&shrink_block_group->item); - } else { - calc = shrink_block_group->key.offset; - } - spin_unlock(&shrink_block_group->lock); +int btrfs_set_block_group_ro(struct btrfs_root *root, + struct btrfs_block_group_cache *cache) - do_chunk_alloc(trans, root->fs_info->extent_root, - calc + 2 * 1024 * 1024, new_alloc_flags, force); +{ + struct btrfs_trans_handle *trans; + u64 alloc_flags; + int ret; - btrfs_end_transaction(trans, root); - } else - spin_unlock(&shrink_block_group->lock); - return 0; -} + BUG_ON(cache->ro); + trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); -int btrfs_prepare_block_group_relocation(struct btrfs_root *root, - struct btrfs_block_group_cache *group) + alloc_flags = update_block_group_flags(root, cache->flags); + if (alloc_flags != cache->flags) + do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + + ret = set_block_group_ro(cache); + if (!ret) + goto out; + alloc_flags = get_alloc_profile(root, cache->space_info->flags); + ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + if (ret < 0) + goto out; + ret = set_block_group_ro(cache); +out: + btrfs_end_transaction(trans, root); + return ret; +} +int btrfs_set_block_group_rw(struct btrfs_root *root, + struct btrfs_block_group_cache *cache) { - __alloc_chunk_for_shrink(root, group, 1); - set_block_group_readonly(group); + struct btrfs_space_info *sinfo = cache->space_info; + u64 num_bytes; + + BUG_ON(!cache->ro); + + spin_lock(&sinfo->lock); + spin_lock(&cache->lock); + num_bytes = cache->key.offset - cache->reserved - cache->pinned - + cache->bytes_super - btrfs_block_group_used(&cache->item); + sinfo->bytes_readonly -= num_bytes; + cache->ro = 0; + spin_unlock(&cache->lock); + spin_unlock(&sinfo->lock); return 0; } @@ -7436,17 +7859,33 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) */ synchronize_rcu(); + release_global_block_rsv(info); + while(!list_empty(&info->space_info)) { space_info = list_entry(info->space_info.next, struct btrfs_space_info, list); - + if (space_info->bytes_pinned > 0 || + space_info->bytes_reserved > 0) { + WARN_ON(1); + dump_space_info(space_info, 0, 0); + } list_del(&space_info->list); kfree(space_info); } return 0; } +static void __link_block_group(struct btrfs_space_info *space_info, + struct btrfs_block_group_cache *cache) +{ + int index = get_block_group_index(cache); + + down_write(&space_info->groups_sem); + list_add_tail(&cache->list, &space_info->block_groups[index]); + up_write(&space_info->groups_sem); +} + int btrfs_read_block_groups(struct btrfs_root *root) { struct btrfs_path *path; @@ -7468,10 +7907,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) while (1) { ret = find_first_block_group(root, path, &key); - if (ret > 0) { - ret = 0; - goto error; - } + if (ret > 0) + break; if (ret != 0) goto error; @@ -7480,7 +7917,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache = kzalloc(sizeof(*cache), GFP_NOFS); if (!cache) { ret = -ENOMEM; - break; + goto error; } atomic_set(&cache->count, 1); @@ -7537,20 +7974,36 @@ int btrfs_read_block_groups(struct btrfs_root *root) BUG_ON(ret); cache->space_info = space_info; spin_lock(&cache->space_info->lock); - cache->space_info->bytes_super += cache->bytes_super; + cache->space_info->bytes_readonly += cache->bytes_super; spin_unlock(&cache->space_info->lock); - down_write(&space_info->groups_sem); - list_add_tail(&cache->list, &space_info->block_groups); - up_write(&space_info->groups_sem); + __link_block_group(space_info, cache); ret = btrfs_add_block_group_cache(root->fs_info, cache); BUG_ON(ret); set_avail_alloc_bits(root->fs_info, cache->flags); if (btrfs_chunk_readonly(root, cache->key.objectid)) - set_block_group_readonly(cache); + set_block_group_ro(cache); + } + + list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) { + if (!(get_alloc_profile(root, space_info->flags) & + (BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_DUP))) + continue; + /* + * avoid allocating from un-mirrored block group if there are + * mirrored block groups. + */ + list_for_each_entry(cache, &space_info->block_groups[3], list) + set_block_group_ro(cache); + list_for_each_entry(cache, &space_info->block_groups[4], list) + set_block_group_ro(cache); } + + init_global_block_rsv(info); ret = 0; error: btrfs_free_path(path); @@ -7611,12 +8064,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, BUG_ON(ret); spin_lock(&cache->space_info->lock); - cache->space_info->bytes_super += cache->bytes_super; + cache->space_info->bytes_readonly += cache->bytes_super; spin_unlock(&cache->space_info->lock); - down_write(&cache->space_info->groups_sem); - list_add_tail(&cache->list, &cache->space_info->block_groups); - up_write(&cache->space_info->groups_sem); + __link_block_group(cache->space_info, cache); ret = btrfs_add_block_group_cache(root->fs_info, cache); BUG_ON(ret); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d2d03684fab2..a4080c21ec55 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -135,7 +135,7 @@ static struct extent_state *alloc_extent_state(gfp_t mask) return state; } -static void free_extent_state(struct extent_state *state) +void free_extent_state(struct extent_state *state) { if (!state) return; @@ -335,21 +335,18 @@ static int merge_state(struct extent_io_tree *tree, } static int set_state_cb(struct extent_io_tree *tree, - struct extent_state *state, - unsigned long bits) + struct extent_state *state, int *bits) { if (tree->ops && tree->ops->set_bit_hook) { return tree->ops->set_bit_hook(tree->mapping->host, - state->start, state->end, - state->state, bits); + state, bits); } return 0; } static void clear_state_cb(struct extent_io_tree *tree, - struct extent_state *state, - unsigned long bits) + struct extent_state *state, int *bits) { if (tree->ops && tree->ops->clear_bit_hook) tree->ops->clear_bit_hook(tree->mapping->host, state, bits); @@ -367,9 +364,10 @@ static void clear_state_cb(struct extent_io_tree *tree, */ static int insert_state(struct extent_io_tree *tree, struct extent_state *state, u64 start, u64 end, - int bits) + int *bits) { struct rb_node *node; + int bits_to_set = *bits & ~EXTENT_CTLBITS; int ret; if (end < start) { @@ -384,9 +382,9 @@ static int insert_state(struct extent_io_tree *tree, if (ret) return ret; - if (bits & EXTENT_DIRTY) + if (bits_to_set & EXTENT_DIRTY) tree->dirty_bytes += end - start + 1; - state->state |= bits; + state->state |= bits_to_set; node = tree_insert(&tree->state, end, &state->rb_node); if (node) { struct extent_state *found; @@ -456,13 +454,13 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, * struct is freed and removed from the tree */ static int clear_state_bit(struct extent_io_tree *tree, - struct extent_state *state, int bits, int wake, - int delete) + struct extent_state *state, + int *bits, int wake) { - int bits_to_clear = bits & ~EXTENT_DO_ACCOUNTING; + int bits_to_clear = *bits & ~EXTENT_CTLBITS; int ret = state->state & bits_to_clear; - if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { + if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { u64 range = state->end - state->start + 1; WARN_ON(range > tree->dirty_bytes); tree->dirty_bytes -= range; @@ -471,9 +469,8 @@ static int clear_state_bit(struct extent_io_tree *tree, state->state &= ~bits_to_clear; if (wake) wake_up(&state->wq); - if (delete || state->state == 0) { + if (state->state == 0) { if (state->tree) { - clear_state_cb(tree, state, state->state); rb_erase(&state->rb_node, &tree->state); state->tree = NULL; free_extent_state(state); @@ -514,6 +511,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int set = 0; int clear = 0; + if (delete) + bits |= ~EXTENT_CTLBITS; + bits |= EXTENT_FIRST_DELALLOC; + if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) clear = 1; again: @@ -580,8 +581,7 @@ hit_next: if (err) goto out; if (state->end <= end) { - set |= clear_state_bit(tree, state, bits, wake, - delete); + set |= clear_state_bit(tree, state, &bits, wake); if (last_end == (u64)-1) goto out; start = last_end + 1; @@ -602,7 +602,7 @@ hit_next: if (wake) wake_up(&state->wq); - set |= clear_state_bit(tree, prealloc, bits, wake, delete); + set |= clear_state_bit(tree, prealloc, &bits, wake); prealloc = NULL; goto out; @@ -613,7 +613,7 @@ hit_next: else next_node = NULL; - set |= clear_state_bit(tree, state, bits, wake, delete); + set |= clear_state_bit(tree, state, &bits, wake); if (last_end == (u64)-1) goto out; start = last_end + 1; @@ -706,19 +706,19 @@ out: static int set_state_bits(struct extent_io_tree *tree, struct extent_state *state, - int bits) + int *bits) { int ret; + int bits_to_set = *bits & ~EXTENT_CTLBITS; ret = set_state_cb(tree, state, bits); if (ret) return ret; - - if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { + if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { u64 range = state->end - state->start + 1; tree->dirty_bytes += range; } - state->state |= bits; + state->state |= bits_to_set; return 0; } @@ -745,10 +745,9 @@ static void cache_state(struct extent_state *state, * [start, end] is inclusive This takes the tree lock. */ -static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - int bits, int exclusive_bits, u64 *failed_start, - struct extent_state **cached_state, - gfp_t mask) +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int exclusive_bits, u64 *failed_start, + struct extent_state **cached_state, gfp_t mask) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -757,6 +756,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u64 last_start; u64 last_end; + bits |= EXTENT_FIRST_DELALLOC; again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); @@ -778,7 +778,7 @@ again: */ node = tree_search(tree, start); if (!node) { - err = insert_state(tree, prealloc, start, end, bits); + err = insert_state(tree, prealloc, start, end, &bits); prealloc = NULL; BUG_ON(err == -EEXIST); goto out; @@ -802,7 +802,7 @@ hit_next: goto out; } - err = set_state_bits(tree, state, bits); + err = set_state_bits(tree, state, &bits); if (err) goto out; @@ -852,7 +852,7 @@ hit_next: if (err) goto out; if (state->end <= end) { - err = set_state_bits(tree, state, bits); + err = set_state_bits(tree, state, &bits); if (err) goto out; cache_state(state, cached_state); @@ -877,7 +877,7 @@ hit_next: else this_end = last_start - 1; err = insert_state(tree, prealloc, start, this_end, - bits); + &bits); BUG_ON(err == -EEXIST); if (err) { prealloc = NULL; @@ -903,7 +903,7 @@ hit_next: err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); - err = set_state_bits(tree, prealloc, bits); + err = set_state_bits(tree, prealloc, &bits); if (err) { prealloc = NULL; goto out; @@ -966,8 +966,7 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, { return clear_extent_bit(tree, start, end, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 0, 0, - NULL, mask); + EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask); } int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, @@ -1435,9 +1434,6 @@ int extent_clear_unlock_delalloc(struct inode *inode, if (op & EXTENT_CLEAR_DELALLOC) clear_bits |= EXTENT_DELALLOC; - if (op & EXTENT_CLEAR_ACCOUNTING) - clear_bits |= EXTENT_DO_ACCOUNTING; - clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK | @@ -1916,7 +1912,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, if (tree->ops && tree->ops->submit_bio_hook) tree->ops->submit_bio_hook(page->mapping->host, rw, bio, - mirror_num, bio_flags); + mirror_num, bio_flags, start); else submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) @@ -2020,6 +2016,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, sector_t sector; struct extent_map *em; struct block_device *bdev; + struct btrfs_ordered_extent *ordered; int ret; int nr = 0; size_t page_offset = 0; @@ -2031,7 +2028,15 @@ static int __extent_read_full_page(struct extent_io_tree *tree, set_page_extent_mapped(page); end = page_end; - lock_extent(tree, start, end, GFP_NOFS); + while (1) { + lock_extent(tree, start, end, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(inode, start); + if (!ordered) + break; + unlock_extent(tree, start, end, GFP_NOFS); + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + } if (page->index == last_byte >> PAGE_CACHE_SHIFT) { char *userpage; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index bbab4813646f..5691c7b590da 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -16,7 +16,9 @@ #define EXTENT_BOUNDARY (1 << 9) #define EXTENT_NODATASUM (1 << 10) #define EXTENT_DO_ACCOUNTING (1 << 11) +#define EXTENT_FIRST_DELALLOC (1 << 12) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) +#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) /* flags for bio submission */ #define EXTENT_BIO_COMPRESSED 1 @@ -47,7 +49,7 @@ struct extent_state; typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, struct bio *bio, int mirror_num, - unsigned long bio_flags); + unsigned long bio_flags, u64 bio_offset); struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, @@ -69,10 +71,10 @@ struct extent_io_ops { struct extent_state *state); int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, - unsigned long old, unsigned long bits); + int (*set_bit_hook)(struct inode *inode, struct extent_state *state, + int *bits); int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned long bits); + int *bits); int (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -176,6 +178,7 @@ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, unsigned long bits); +void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int filled, struct extent_state *cached_state); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, @@ -185,6 +188,9 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask); +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int exclusive_bits, u64 *failed_start, + struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 54a255065aa3..a562a250ae77 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -149,13 +149,14 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, } -int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u32 *dst) +static int __btrfs_lookup_bio_sums(struct btrfs_root *root, + struct inode *inode, struct bio *bio, + u64 logical_offset, u32 *dst, int dio) { u32 sum; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; - u64 offset; + u64 offset = 0; u64 item_start_offset = 0; u64 item_last_offset = 0; u64 disk_bytenr; @@ -174,8 +175,11 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, WARN_ON(bio->bi_vcnt <= 0); disk_bytenr = (u64)bio->bi_sector << 9; + if (dio) + offset = logical_offset; while (bio_index < bio->bi_vcnt) { - offset = page_offset(bvec->bv_page) + bvec->bv_offset; + if (!dio) + offset = page_offset(bvec->bv_page) + bvec->bv_offset; ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) goto found; @@ -238,6 +242,7 @@ found: else set_state_private(io_tree, offset, sum); disk_bytenr += bvec->bv_len; + offset += bvec->bv_len; bio_index++; bvec++; } @@ -245,6 +250,18 @@ found: return 0; } +int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, + struct bio *bio, u32 *dst) +{ + return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); +} + +int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, + struct bio *bio, u64 offset, u32 *dst) +{ + return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); +} + int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list) { @@ -657,6 +674,9 @@ again: goto found; } ret = PTR_ERR(item); + if (ret != -EFBIG && ret != -ENOENT) + goto fail_unlock; + if (ret == -EFBIG) { u32 item_size; /* we found one, but it isn't big enough yet */ diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 29ff749ff4ca..787b50a16a14 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -46,32 +46,42 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, - const char __user *buf) + struct iov_iter *i) { - long page_fault = 0; - int i; + size_t copied; + int pg = 0; int offset = pos & (PAGE_CACHE_SIZE - 1); - for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) { + while (write_bytes > 0) { size_t count = min_t(size_t, PAGE_CACHE_SIZE - offset, write_bytes); - struct page *page = prepared_pages[i]; - fault_in_pages_readable(buf, count); + struct page *page = prepared_pages[pg]; +again: + if (unlikely(iov_iter_fault_in_readable(i, count))) + return -EFAULT; /* Copy data from userspace to the current page */ - kmap(page); - page_fault = __copy_from_user(page_address(page) + offset, - buf, count); + copied = iov_iter_copy_from_user(page, i, offset, count); + /* Flush processor's dcache for this page */ flush_dcache_page(page); - kunmap(page); - buf += count; - write_bytes -= count; + iov_iter_advance(i, copied); + write_bytes -= copied; - if (page_fault) - break; + if (unlikely(copied == 0)) { + count = min_t(size_t, PAGE_CACHE_SIZE - offset, + iov_iter_single_seg_count(i)); + goto again; + } + + if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { + offset += copied; + } else { + pg++; + offset = 0; + } } - return page_fault ? -EFAULT : 0; + return 0; } /* @@ -126,8 +136,7 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, end_of_last_block = start_pos + num_bytes - 1; err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, NULL); - if (err) - return err; + BUG_ON(err); for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; @@ -142,7 +151,7 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, * at this time. */ } - return err; + return 0; } /* @@ -823,45 +832,46 @@ again: return 0; } -static ssize_t btrfs_file_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t btrfs_file_aio_write(struct kiocb *iocb, + const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { - loff_t pos; + struct file *file = iocb->ki_filp; + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct page *pinned[2]; + struct page **pages = NULL; + struct iov_iter i; + loff_t *ppos = &iocb->ki_pos; loff_t start_pos; ssize_t num_written = 0; ssize_t err = 0; + size_t count; + size_t ocount; int ret = 0; - struct inode *inode = fdentry(file)->d_inode; - struct btrfs_root *root = BTRFS_I(inode)->root; - struct page **pages = NULL; int nrptrs; - struct page *pinned[2]; unsigned long first_index; unsigned long last_index; int will_write; + int buffered = 0; will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || (file->f_flags & O_DIRECT)); - nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE, - PAGE_CACHE_SIZE / (sizeof(struct page *))); pinned[0] = NULL; pinned[1] = NULL; - pos = *ppos; start_pos = pos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); - /* do the reserve before the mutex lock in case we have to do some - * flushing. We wouldn't deadlock, but this is more polite. - */ - err = btrfs_reserve_metadata_for_delalloc(root, inode, 1); - if (err) - goto out_nolock; - mutex_lock(&inode->i_mutex); + err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); + if (err) + goto out; + count = ocount; + current->backing_dev_info = inode->i_mapping->backing_dev_info; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) @@ -875,15 +885,53 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, goto out; file_update_time(file); + BTRFS_I(inode)->sequence++; + + if (unlikely(file->f_flags & O_DIRECT)) { + num_written = generic_file_direct_write(iocb, iov, &nr_segs, + pos, ppos, count, + ocount); + /* + * the generic O_DIRECT will update in-memory i_size after the + * DIOs are done. But our endio handlers that update the on + * disk i_size never update past the in memory i_size. So we + * need one more update here to catch any additions to the + * file + */ + if (inode->i_size != BTRFS_I(inode)->disk_i_size) { + btrfs_ordered_update_i_size(inode, inode->i_size, NULL); + mark_inode_dirty(inode); + } + if (num_written < 0) { + ret = num_written; + num_written = 0; + goto out; + } else if (num_written == count) { + /* pick up pos changes done by the generic code */ + pos = *ppos; + goto out; + } + /* + * We are going to do buffered for the rest of the range, so we + * need to make sure to invalidate the buffered pages when we're + * done. + */ + buffered = 1; + pos += num_written; + } + + iov_iter_init(&i, iov, nr_segs, count, num_written); + nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / + (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); /* generic_write_checks can change our pos */ start_pos = pos; - BTRFS_I(inode)->sequence++; first_index = pos >> PAGE_CACHE_SHIFT; - last_index = (pos + count) >> PAGE_CACHE_SHIFT; + last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; /* * there are lots of better ways to do this, but this code @@ -900,7 +948,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, unlock_page(pinned[0]); } } - if ((pos + count) & (PAGE_CACHE_SIZE - 1)) { + if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { pinned[1] = grab_cache_page(inode->i_mapping, last_index); if (!PageUptodate(pinned[1])) { ret = btrfs_readpage(NULL, pinned[1]); @@ -911,10 +959,10 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, } } - while (count > 0) { + while (iov_iter_count(&i) > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); - size_t write_bytes = min(count, nrptrs * - (size_t)PAGE_CACHE_SIZE - + size_t write_bytes = min(iov_iter_count(&i), + nrptrs * (size_t)PAGE_CACHE_SIZE - offset); size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -922,7 +970,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); - ret = btrfs_check_data_free_space(root, inode, write_bytes); + ret = btrfs_delalloc_reserve_space(inode, write_bytes); if (ret) goto out; @@ -930,26 +978,20 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, pos, first_index, last_index, write_bytes); if (ret) { - btrfs_free_reserved_data_space(root, inode, - write_bytes); + btrfs_delalloc_release_space(inode, write_bytes); goto out; } ret = btrfs_copy_from_user(pos, num_pages, - write_bytes, pages, buf); - if (ret) { - btrfs_free_reserved_data_space(root, inode, - write_bytes); - btrfs_drop_pages(pages, num_pages); - goto out; + write_bytes, pages, &i); + if (ret == 0) { + dirty_and_release_pages(NULL, root, file, pages, + num_pages, pos, write_bytes); } - ret = dirty_and_release_pages(NULL, root, file, pages, - num_pages, pos, write_bytes); btrfs_drop_pages(pages, num_pages); if (ret) { - btrfs_free_reserved_data_space(root, inode, - write_bytes); + btrfs_delalloc_release_space(inode, write_bytes); goto out; } @@ -965,8 +1007,6 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, btrfs_throttle(root); } - buf += write_bytes; - count -= write_bytes; pos += write_bytes; num_written += write_bytes; @@ -976,9 +1016,7 @@ out: mutex_unlock(&inode->i_mutex); if (ret) err = ret; - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); -out_nolock: kfree(pages); if (pinned[0]) page_cache_release(pinned[0]); @@ -1008,7 +1046,7 @@ out_nolock: num_written = err; if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); if (ret == 0) { @@ -1023,7 +1061,7 @@ out_nolock: btrfs_end_transaction(trans, root); } } - if (file->f_flags & O_DIRECT) { + if (file->f_flags & O_DIRECT && buffered) { invalidate_mapping_pages(inode->i_mapping, start_pos >> PAGE_CACHE_SHIFT, (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); @@ -1063,8 +1101,9 @@ int btrfs_release_file(struct inode *inode, struct file *filp) * important optimization for directories because holding the mutex prevents * new operations on the dir while we write to disk. */ -int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) +int btrfs_sync_file(struct file *file, int datasync) { + struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -1104,9 +1143,9 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) if (file && file->private_data) btrfs_ioctl_trans_end(file); - trans = btrfs_start_transaction(root, 1); - if (!trans) { - ret = -ENOMEM; + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); goto out; } @@ -1161,7 +1200,7 @@ const struct file_operations btrfs_file_operations = { .read = do_sync_read, .aio_read = generic_file_aio_read, .splice_read = generic_file_splice_read, - .write = btrfs_file_write, + .aio_write = btrfs_file_aio_write, .mmap = btrfs_file_mmap, .open = generic_file_open, .release = btrfs_release_file, diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 72ce3c173d6a..64f1150bb48d 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -49,6 +49,33 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name, return 0; } +struct btrfs_inode_ref * +btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + const char *name, int name_len, + u64 inode_objectid, u64 ref_objectid, int mod) +{ + struct btrfs_key key; + struct btrfs_inode_ref *ref; + int ins_len = mod < 0 ? -1 : 0; + int cow = mod != 0; + int ret; + + key.objectid = inode_objectid; + key.type = BTRFS_INODE_REF_KEY; + key.offset = ref_objectid; + + ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow); + if (ret < 0) + return ERR_PTR(ret); + if (ret > 0) + return NULL; + if (!find_name_in_backref(path, name, name_len, &ref)) + return NULL; + return ref; +} + int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d601629b85d1..fa6ccc1bfe2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -252,6 +252,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans, inline_len, compressed_size, compressed_pages); BUG_ON(ret); + btrfs_delalloc_release_metadata(inode, end + 1 - start); btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); return 0; } @@ -414,6 +415,7 @@ again: trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; /* lets try to make an inline extent */ if (ret || total_in < (actual_end - start)) { @@ -439,7 +441,6 @@ again: start, end, NULL, EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY | EXTENT_CLEAR_DELALLOC | - EXTENT_CLEAR_ACCOUNTING | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); btrfs_end_transaction(trans, root); @@ -697,6 +698,38 @@ retry: return 0; } +static u64 get_extent_allocation_hint(struct inode *inode, u64 start, + u64 num_bytes) +{ + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map *em; + u64 alloc_hint = 0; + + read_lock(&em_tree->lock); + em = search_extent_mapping(em_tree, start, num_bytes); + if (em) { + /* + * if block start isn't an actual block number then find the + * first block in this inode and use that as a hint. If that + * block is also bogus then just don't worry about it. + */ + if (em->block_start >= EXTENT_MAP_LAST_BYTE) { + free_extent_map(em); + em = search_extent_mapping(em_tree, 0, 0); + if (em && em->block_start < EXTENT_MAP_LAST_BYTE) + alloc_hint = em->block_start; + if (em) + free_extent_map(em); + } else { + alloc_hint = em->block_start; + free_extent_map(em); + } + } + read_unlock(&em_tree->lock); + + return alloc_hint; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -734,6 +767,7 @@ static noinline int cow_file_range(struct inode *inode, trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; actual_end = min_t(u64, isize, end + 1); @@ -753,7 +787,6 @@ static noinline int cow_file_range(struct inode *inode, EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC | - EXTENT_CLEAR_ACCOUNTING | EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); @@ -769,29 +802,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(disk_num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); - - read_lock(&BTRFS_I(inode)->extent_tree.lock); - em = search_extent_mapping(&BTRFS_I(inode)->extent_tree, - start, num_bytes); - if (em) { - /* - * if block start isn't an actual block number then find the - * first block in this inode and use that as a hint. If that - * block is also bogus then just don't worry about it. - */ - if (em->block_start >= EXTENT_MAP_LAST_BYTE) { - free_extent_map(em); - em = search_extent_mapping(em_tree, 0, 0); - if (em && em->block_start < EXTENT_MAP_LAST_BYTE) - alloc_hint = em->block_start; - if (em) - free_extent_map(em); - } else { - alloc_hint = em->block_start; - free_extent_map(em); - } - } - read_unlock(&BTRFS_I(inode)->extent_tree.lock); + alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); while (disk_num_bytes > 0) { @@ -1174,6 +1185,13 @@ out_check: num_bytes, num_bytes, type); BUG_ON(ret); + if (root->root_key.objectid == + BTRFS_DATA_RELOC_TREE_OBJECTID) { + ret = btrfs_reloc_clone_csums(inode, cur_offset, + num_bytes); + BUG_ON(ret); + } + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, cur_offset, cur_offset + num_bytes - 1, locked_page, EXTENT_CLEAR_UNLOCK_PAGE | @@ -1226,15 +1244,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, } static int btrfs_split_extent_hook(struct inode *inode, - struct extent_state *orig, u64 split) + struct extent_state *orig, u64 split) { + /* not delalloc, ignore it */ if (!(orig->state & EXTENT_DELALLOC)) return 0; - spin_lock(&BTRFS_I(inode)->accounting_lock); - BTRFS_I(inode)->outstanding_extents++; - spin_unlock(&BTRFS_I(inode)->accounting_lock); - + atomic_inc(&BTRFS_I(inode)->outstanding_extents); return 0; } @@ -1252,10 +1268,7 @@ static int btrfs_merge_extent_hook(struct inode *inode, if (!(other->state & EXTENT_DELALLOC)) return 0; - spin_lock(&BTRFS_I(inode)->accounting_lock); - BTRFS_I(inode)->outstanding_extents--; - spin_unlock(&BTRFS_I(inode)->accounting_lock); - + atomic_dec(&BTRFS_I(inode)->outstanding_extents); return 0; } @@ -1264,8 +1277,8 @@ static int btrfs_merge_extent_hook(struct inode *inode, * bytes in this file, and to maintain the list of inodes that * have pending delalloc work to be done. */ -static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, - unsigned long old, unsigned long bits) +static int btrfs_set_bit_hook(struct inode *inode, + struct extent_state *state, int *bits) { /* @@ -1273,17 +1286,18 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, * but in this case, we are only testeing for the DELALLOC * bit, which is only set or cleared with irqs on */ - if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { + if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; + u64 len = state->end + 1 - state->start; - spin_lock(&BTRFS_I(inode)->accounting_lock); - BTRFS_I(inode)->outstanding_extents++; - spin_unlock(&BTRFS_I(inode)->accounting_lock); - btrfs_delalloc_reserve_space(root, inode, end - start + 1); + if (*bits & EXTENT_FIRST_DELALLOC) + *bits &= ~EXTENT_FIRST_DELALLOC; + else + atomic_inc(&BTRFS_I(inode)->outstanding_extents); spin_lock(&root->fs_info->delalloc_lock); - BTRFS_I(inode)->delalloc_bytes += end - start + 1; - root->fs_info->delalloc_bytes += end - start + 1; + BTRFS_I(inode)->delalloc_bytes += len; + root->fs_info->delalloc_bytes += len; if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_add_tail(&BTRFS_I(inode)->delalloc_inodes, &root->fs_info->delalloc_inodes); @@ -1297,45 +1311,32 @@ static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, * extent_io.c clear_bit_hook, see set_bit_hook for why */ static int btrfs_clear_bit_hook(struct inode *inode, - struct extent_state *state, unsigned long bits) + struct extent_state *state, int *bits) { /* * set_bit and clear bit hooks normally require _irqsave/restore * but in this case, we are only testeing for the DELALLOC * bit, which is only set or cleared with irqs on */ - if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { + if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; + u64 len = state->end + 1 - state->start; - if (bits & EXTENT_DO_ACCOUNTING) { - spin_lock(&BTRFS_I(inode)->accounting_lock); - WARN_ON(!BTRFS_I(inode)->outstanding_extents); - BTRFS_I(inode)->outstanding_extents--; - spin_unlock(&BTRFS_I(inode)->accounting_lock); - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); - } + if (*bits & EXTENT_FIRST_DELALLOC) + *bits &= ~EXTENT_FIRST_DELALLOC; + else if (!(*bits & EXTENT_DO_ACCOUNTING)) + atomic_dec(&BTRFS_I(inode)->outstanding_extents); + + if (*bits & EXTENT_DO_ACCOUNTING) + btrfs_delalloc_release_metadata(inode, len); + + if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) + btrfs_free_reserved_data_space(inode, len); spin_lock(&root->fs_info->delalloc_lock); - if (state->end - state->start + 1 > - root->fs_info->delalloc_bytes) { - printk(KERN_INFO "btrfs warning: delalloc account " - "%llu %llu\n", - (unsigned long long) - state->end - state->start + 1, - (unsigned long long) - root->fs_info->delalloc_bytes); - btrfs_delalloc_free_space(root, inode, (u64)-1); - root->fs_info->delalloc_bytes = 0; - BTRFS_I(inode)->delalloc_bytes = 0; - } else { - btrfs_delalloc_free_space(root, inode, - state->end - - state->start + 1); - root->fs_info->delalloc_bytes -= state->end - - state->start + 1; - BTRFS_I(inode)->delalloc_bytes -= state->end - - state->start + 1; - } + root->fs_info->delalloc_bytes -= len; + BTRFS_I(inode)->delalloc_bytes -= len; + if (BTRFS_I(inode)->delalloc_bytes == 0 && !list_empty(&BTRFS_I(inode)->delalloc_inodes)) { list_del_init(&BTRFS_I(inode)->delalloc_inodes); @@ -1384,7 +1385,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, */ static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, - unsigned long bio_flags) + unsigned long bio_flags, + u64 bio_offset) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -1403,7 +1405,8 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw, * are inserted into the btree */ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, unsigned long bio_flags, + u64 bio_offset) { struct btrfs_root *root = BTRFS_I(inode)->root; return btrfs_map_bio(root, rw, bio, mirror_num, 1); @@ -1414,7 +1417,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, * on write, or reading the csums from the tree before a read */ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) + int mirror_num, unsigned long bio_flags, + u64 bio_offset) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -1439,7 +1443,8 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, /* we're doing a write, do the async checksumming */ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, - bio_flags, __btrfs_submit_bio_start, + bio_flags, bio_offset, + __btrfs_submit_bio_start, __btrfs_submit_bio_done); } @@ -1520,6 +1525,7 @@ again: goto again; } + BUG(); btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); ClearPageChecked(page); out: @@ -1650,7 +1656,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; + struct btrfs_trans_handle *trans = NULL; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; @@ -1668,9 +1674,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (!ret) { trans = btrfs_join_transaction(root, 1); + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); - btrfs_end_transaction(trans, root); } goto out; } @@ -1680,6 +1687,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 0, &cached_state, GFP_NOFS); trans = btrfs_join_transaction(root, 1); + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) compressed = 1; @@ -1711,12 +1720,13 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); - /* this also removes the ordered extent from the tree */ btrfs_ordered_update_i_size(inode, 0, ordered_extent); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); - btrfs_end_transaction(trans, root); out: + btrfs_delalloc_release_metadata(inode, ordered_extent->len); + if (trans) + btrfs_end_transaction(trans, root); /* once for us */ btrfs_put_ordered_extent(ordered_extent); /* once for the tree */ @@ -1838,7 +1848,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, failrec->last_mirror, - failrec->bio_flags); + failrec->bio_flags, 0); return 0; } @@ -1992,33 +2002,197 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root) up_read(&root->fs_info->cleanup_work_sem); } +/* + * calculate extra metadata reservation when snapshotting a subvolume + * contains orphan files. + */ +void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending, + u64 *bytes_to_reserve) +{ + struct btrfs_root *root; + struct btrfs_block_rsv *block_rsv; + u64 num_bytes; + int index; + + root = pending->root; + if (!root->orphan_block_rsv || list_empty(&root->orphan_list)) + return; + + block_rsv = root->orphan_block_rsv; + + /* orphan block reservation for the snapshot */ + num_bytes = block_rsv->size; + + /* + * after the snapshot is created, COWing tree blocks may use more + * space than it frees. So we should make sure there is enough + * reserved space. + */ + index = trans->transid & 0x1; + if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) { + num_bytes += block_rsv->size - + (block_rsv->reserved + block_rsv->freed[index]); + } + + *bytes_to_reserve += num_bytes; +} + +void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending) +{ + struct btrfs_root *root = pending->root; + struct btrfs_root *snap = pending->snap; + struct btrfs_block_rsv *block_rsv; + u64 num_bytes; + int index; + int ret; + + if (!root->orphan_block_rsv || list_empty(&root->orphan_list)) + return; + + /* refill source subvolume's orphan block reservation */ + block_rsv = root->orphan_block_rsv; + index = trans->transid & 0x1; + if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) { + num_bytes = block_rsv->size - + (block_rsv->reserved + block_rsv->freed[index]); + ret = btrfs_block_rsv_migrate(&pending->block_rsv, + root->orphan_block_rsv, + num_bytes); + BUG_ON(ret); + } + + /* setup orphan block reservation for the snapshot */ + block_rsv = btrfs_alloc_block_rsv(snap); + BUG_ON(!block_rsv); + + btrfs_add_durable_block_rsv(root->fs_info, block_rsv); + snap->orphan_block_rsv = block_rsv; + + num_bytes = root->orphan_block_rsv->size; + ret = btrfs_block_rsv_migrate(&pending->block_rsv, + block_rsv, num_bytes); + BUG_ON(ret); + +#if 0 + /* insert orphan item for the snapshot */ + WARN_ON(!root->orphan_item_inserted); + ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, + snap->root_key.objectid); + BUG_ON(ret); + snap->orphan_item_inserted = 1; +#endif +} + +enum btrfs_orphan_cleanup_state { + ORPHAN_CLEANUP_STARTED = 1, + ORPHAN_CLEANUP_DONE = 2, +}; + +/* + * This is called in transaction commmit time. If there are no orphan + * files in the subvolume, it removes orphan item and frees block_rsv + * structure. + */ +void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + int ret; + + if (!list_empty(&root->orphan_list) || + root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) + return; + + if (root->orphan_item_inserted && + btrfs_root_refs(&root->root_item) > 0) { + ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root, + root->root_key.objectid); + BUG_ON(ret); + root->orphan_item_inserted = 0; + } + + if (root->orphan_block_rsv) { + WARN_ON(root->orphan_block_rsv->size > 0); + btrfs_free_block_rsv(root, root->orphan_block_rsv); + root->orphan_block_rsv = NULL; + } +} + /* * This creates an orphan entry for the given inode in case something goes * wrong in the middle of an unlink/truncate. + * + * NOTE: caller of this function should reserve 5 units of metadata for + * this function. */ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - int ret = 0; + struct btrfs_block_rsv *block_rsv = NULL; + int reserve = 0; + int insert = 0; + int ret; + + if (!root->orphan_block_rsv) { + block_rsv = btrfs_alloc_block_rsv(root); + BUG_ON(!block_rsv); + } - spin_lock(&root->list_lock); + spin_lock(&root->orphan_lock); + if (!root->orphan_block_rsv) { + root->orphan_block_rsv = block_rsv; + } else if (block_rsv) { + btrfs_free_block_rsv(root, block_rsv); + block_rsv = NULL; + } - /* already on the orphan list, we're good */ - if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - spin_unlock(&root->list_lock); - return 0; + if (list_empty(&BTRFS_I(inode)->i_orphan)) { + list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); +#if 0 + /* + * For proper ENOSPC handling, we should do orphan + * cleanup when mounting. But this introduces backward + * compatibility issue. + */ + if (!xchg(&root->orphan_item_inserted, 1)) + insert = 2; + else + insert = 1; +#endif + insert = 1; + } else { + WARN_ON(!BTRFS_I(inode)->orphan_meta_reserved); } - list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); + if (!BTRFS_I(inode)->orphan_meta_reserved) { + BTRFS_I(inode)->orphan_meta_reserved = 1; + reserve = 1; + } + spin_unlock(&root->orphan_lock); - spin_unlock(&root->list_lock); + if (block_rsv) + btrfs_add_durable_block_rsv(root->fs_info, block_rsv); - /* - * insert an orphan item to track this unlinked/truncated file - */ - ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + /* grab metadata reservation from transaction handle */ + if (reserve) { + ret = btrfs_orphan_reserve_metadata(trans, inode); + BUG_ON(ret); + } - return ret; + /* insert an orphan item to track this unlinked/truncated file */ + if (insert >= 1) { + ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); + BUG_ON(ret); + } + + /* insert an orphan item to track subvolume contains orphan files */ + if (insert >= 2) { + ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root, + root->root_key.objectid); + BUG_ON(ret); + } + return 0; } /* @@ -2028,26 +2202,31 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; + int delete_item = 0; + int release_rsv = 0; int ret = 0; - spin_lock(&root->list_lock); - - if (list_empty(&BTRFS_I(inode)->i_orphan)) { - spin_unlock(&root->list_lock); - return 0; + spin_lock(&root->orphan_lock); + if (!list_empty(&BTRFS_I(inode)->i_orphan)) { + list_del_init(&BTRFS_I(inode)->i_orphan); + delete_item = 1; } - list_del_init(&BTRFS_I(inode)->i_orphan); - if (!trans) { - spin_unlock(&root->list_lock); - return 0; + if (BTRFS_I(inode)->orphan_meta_reserved) { + BTRFS_I(inode)->orphan_meta_reserved = 0; + release_rsv = 1; } + spin_unlock(&root->orphan_lock); - spin_unlock(&root->list_lock); + if (trans && delete_item) { + ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + BUG_ON(ret); + } - ret = btrfs_del_orphan_item(trans, root, inode->i_ino); + if (release_rsv) + btrfs_orphan_release_metadata(inode); - return ret; + return 0; } /* @@ -2064,7 +2243,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) struct inode *inode; int ret = 0, nr_unlink = 0, nr_truncate = 0; - if (!xchg(&root->clean_orphans, 0)) + if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) return; path = btrfs_alloc_path(); @@ -2117,16 +2296,15 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); - if (IS_ERR(inode)) - break; + BUG_ON(IS_ERR(inode)); /* * add this inode to the orphan list so btrfs_orphan_del does * the proper thing when we hit it */ - spin_lock(&root->list_lock); + spin_lock(&root->orphan_lock); list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); - spin_unlock(&root->list_lock); + spin_unlock(&root->orphan_lock); /* * if this is a bad inode, means we actually succeeded in @@ -2135,7 +2313,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * do a destroy_inode */ if (is_bad_inode(inode)) { - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); @@ -2153,13 +2331,23 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) /* this will do delete_inode and everything for us */ iput(inode); } + btrfs_free_path(path); + + root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; + + if (root->orphan_block_rsv) + btrfs_block_rsv_release(root, root->orphan_block_rsv, + (u64)-1); + + if (root->orphan_block_rsv || root->orphan_item_inserted) { + trans = btrfs_join_transaction(root, 1); + btrfs_end_transaction(trans, root); + } if (nr_unlink) printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink); if (nr_truncate) printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); - - btrfs_free_path(path); } /* @@ -2478,103 +2666,276 @@ out: return ret; } -static int btrfs_unlink(struct inode *dir, struct dentry *dentry) +/* helper to check if there is any shared block in the path */ +static int check_path_shared(struct btrfs_root *root, + struct btrfs_path *path) { - struct btrfs_root *root; - struct btrfs_trans_handle *trans; - struct inode *inode = dentry->d_inode; + struct extent_buffer *eb; + int level; int ret; - unsigned long nr = 0; - - root = BTRFS_I(dir)->root; - - /* - * 5 items for unlink inode - * 1 for orphan - */ - ret = btrfs_reserve_metadata_space(root, 6); - if (ret) - return ret; + u64 refs; - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { - btrfs_unreserve_metadata_space(root, 6); - return PTR_ERR(trans); + for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + if (!path->nodes[level]) + break; + eb = path->nodes[level]; + if (!btrfs_block_can_be_shared(root, eb)) + continue; + ret = btrfs_lookup_extent_info(NULL, root, eb->start, eb->len, + &refs, NULL); + if (refs > 1) + return 1; } - - btrfs_set_trans_block_group(trans, dir); - - btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); - - ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, - dentry->d_name.name, dentry->d_name.len); - - if (inode->i_nlink == 0) - ret = btrfs_orphan_add(trans, inode); - - nr = trans->blocks_used; - - btrfs_end_transaction_throttle(trans, root); - btrfs_unreserve_metadata_space(root, 6); - btrfs_btree_balance_dirty(root, nr); - return ret; + return 0; } -int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *dir, u64 objectid, - const char *name, int name_len) +/* + * helper to start transaction for unlink and rmdir. + * + * unlink and rmdir are special in btrfs, they do not always free space. + * so in enospc case, we should make sure they will free space before + * allowing them to use the global metadata reservation. + */ +static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, + struct dentry *dentry) { + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; - struct extent_buffer *leaf; + struct btrfs_inode_ref *ref; struct btrfs_dir_item *di; - struct btrfs_key key; + struct inode *inode = dentry->d_inode; u64 index; + int check_link = 1; + int err = -ENOSPC; int ret; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; + trans = btrfs_start_transaction(root, 10); + if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) + return trans; - di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, - name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + return ERR_PTR(-ENOSPC); - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); - BUG_ON(ret); - btrfs_release_path(root, path); + /* check if there is someone else holds reference */ + if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1) + return ERR_PTR(-ENOSPC); - ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, - objectid, root->root_key.objectid, - dir->i_ino, &index, name, name_len); - if (ret < 0) { - BUG_ON(ret != -ENOENT); - di = btrfs_search_dir_index_item(root, path, dir->i_ino, - name, name_len); - BUG_ON(!di || IS_ERR(di)); + if (atomic_read(&inode->i_count) > 2) + return ERR_PTR(-ENOSPC); - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - btrfs_release_path(root, path); - index = key.offset; + if (xchg(&root->fs_info->enospc_unlink, 1)) + return ERR_PTR(-ENOSPC); + + path = btrfs_alloc_path(); + if (!path) { + root->fs_info->enospc_unlink = 0; + return ERR_PTR(-ENOMEM); } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + root->fs_info->enospc_unlink = 0; + return trans; + } - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); - BUG_ON(ret); - btrfs_release_path(root, path); + path->skip_locking = 1; + path->search_commit_root = 1; - btrfs_i_size_write(dir, dir->i_size - name_len * 2); - dir->i_mtime = dir->i_ctime = CURRENT_TIME; + ret = btrfs_lookup_inode(trans, root, path, + &BTRFS_I(dir)->location, 0); + if (ret < 0) { + err = ret; + goto out; + } + if (ret == 0) { + if (check_path_shared(root, path)) + goto out; + } else { + check_link = 0; + } + btrfs_release_path(root, path); + + ret = btrfs_lookup_inode(trans, root, path, + &BTRFS_I(inode)->location, 0); + if (ret < 0) { + err = ret; + goto out; + } + if (ret == 0) { + if (check_path_shared(root, path)) + goto out; + } else { + check_link = 0; + } + btrfs_release_path(root, path); + + if (ret == 0 && S_ISREG(inode->i_mode)) { + ret = btrfs_lookup_file_extent(trans, root, path, + inode->i_ino, (u64)-1, 0); + if (ret < 0) { + err = ret; + goto out; + } + BUG_ON(ret == 0); + if (check_path_shared(root, path)) + goto out; + btrfs_release_path(root, path); + } + + if (!check_link) { + err = 0; + goto out; + } + + di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + dentry->d_name.name, dentry->d_name.len, 0); + if (IS_ERR(di)) { + err = PTR_ERR(di); + goto out; + } + if (di) { + if (check_path_shared(root, path)) + goto out; + } else { + err = 0; + goto out; + } + btrfs_release_path(root, path); + + ref = btrfs_lookup_inode_ref(trans, root, path, + dentry->d_name.name, dentry->d_name.len, + inode->i_ino, dir->i_ino, 0); + if (IS_ERR(ref)) { + err = PTR_ERR(ref); + goto out; + } + BUG_ON(!ref); + if (check_path_shared(root, path)) + goto out; + index = btrfs_inode_ref_index(path->nodes[0], ref); + btrfs_release_path(root, path); + + di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, + dentry->d_name.name, dentry->d_name.len, 0); + if (IS_ERR(di)) { + err = PTR_ERR(di); + goto out; + } + BUG_ON(ret == -ENOENT); + if (check_path_shared(root, path)) + goto out; + + err = 0; +out: + btrfs_free_path(path); + if (err) { + btrfs_end_transaction(trans, root); + root->fs_info->enospc_unlink = 0; + return ERR_PTR(err); + } + + trans->block_rsv = &root->fs_info->global_block_rsv; + return trans; +} + +static void __unlink_end_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + if (trans->block_rsv == &root->fs_info->global_block_rsv) { + BUG_ON(!root->fs_info->enospc_unlink); + root->fs_info->enospc_unlink = 0; + } + btrfs_end_transaction_throttle(trans, root); +} + +static int btrfs_unlink(struct inode *dir, struct dentry *dentry) +{ + struct btrfs_root *root = BTRFS_I(dir)->root; + struct btrfs_trans_handle *trans; + struct inode *inode = dentry->d_inode; + int ret; + unsigned long nr = 0; + + trans = __unlink_start_trans(dir, dentry); + if (IS_ERR(trans)) + return PTR_ERR(trans); + + btrfs_set_trans_block_group(trans, dir); + + btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); + + ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, + dentry->d_name.name, dentry->d_name.len); + BUG_ON(ret); + + if (inode->i_nlink == 0) { + ret = btrfs_orphan_add(trans, inode); + BUG_ON(ret); + } + + nr = trans->blocks_used; + __unlink_end_trans(trans, root); + btrfs_btree_balance_dirty(root, nr); + return ret; +} + +int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *dir, u64 objectid, + const char *name, int name_len) +{ + struct btrfs_path *path; + struct extent_buffer *leaf; + struct btrfs_dir_item *di; + struct btrfs_key key; + u64 index; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, + name, name_len, -1); + BUG_ON(!di || IS_ERR(di)); + + leaf = path->nodes[0]; + btrfs_dir_item_key_to_cpu(leaf, di, &key); + WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); + ret = btrfs_delete_one_dir_name(trans, root, path, di); + BUG_ON(ret); + btrfs_release_path(root, path); + + ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, + objectid, root->root_key.objectid, + dir->i_ino, &index, name, name_len); + if (ret < 0) { + BUG_ON(ret != -ENOENT); + di = btrfs_search_dir_index_item(root, path, dir->i_ino, + name, name_len); + BUG_ON(!di || IS_ERR(di)); + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + btrfs_release_path(root, path); + index = key.offset; + } + + di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, + index, name, name_len, -1); + BUG_ON(!di || IS_ERR(di)); + + leaf = path->nodes[0]; + btrfs_dir_item_key_to_cpu(leaf, di, &key); + WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); + ret = btrfs_delete_one_dir_name(trans, root, path, di); + BUG_ON(ret); + btrfs_release_path(root, path); + + btrfs_i_size_write(dir, dir->i_size - name_len * 2); + dir->i_mtime = dir->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); dir->i_sb->s_dirt = 1; @@ -2587,7 +2948,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; int err = 0; - int ret; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_trans_handle *trans; unsigned long nr = 0; @@ -2596,15 +2956,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) return -ENOTEMPTY; - ret = btrfs_reserve_metadata_space(root, 5); - if (ret) - return ret; - - trans = btrfs_start_transaction(root, 1); - if (IS_ERR(trans)) { - btrfs_unreserve_metadata_space(root, 5); + trans = __unlink_start_trans(dir, dentry); + if (IS_ERR(trans)) return PTR_ERR(trans); - } btrfs_set_trans_block_group(trans, dir); @@ -2627,12 +2981,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) btrfs_i_size_write(inode, 0); out: nr = trans->blocks_used; - ret = btrfs_end_transaction_throttle(trans, root); - btrfs_unreserve_metadata_space(root, 5); + __unlink_end_trans(trans, root); btrfs_btree_balance_dirty(root, nr); - if (ret && !err) - err = ret; return err; } @@ -3029,6 +3380,7 @@ out: if (pending_del_nr) { ret = btrfs_del_items(trans, root, path, pending_del_slot, pending_del_nr); + BUG_ON(ret); } btrfs_free_path(path); return err; @@ -3056,11 +3408,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) if ((offset & (blocksize - 1)) == 0) goto out; - ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); - if (ret) - goto out; - - ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); + ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) goto out; @@ -3068,8 +3416,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) again: page = grab_cache_page(mapping, index); if (!page) { - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); + btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); goto out; } @@ -3132,8 +3479,7 @@ again: out_unlock: if (ret) - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); + btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); unlock_page(page); page_cache_release(page); out: @@ -3145,7 +3491,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct extent_map *em; + struct extent_map *em = NULL; struct extent_state *cached_state = NULL; u64 mask = root->sectorsize - 1; u64 hole_start = (inode->i_size + mask) & ~mask; @@ -3183,11 +3529,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) u64 hint_byte = 0; hole_size = last_byte - cur_offset; - err = btrfs_reserve_metadata_space(root, 2); - if (err) + trans = btrfs_start_transaction(root, 2); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); break; - - trans = btrfs_start_transaction(root, 1); + } btrfs_set_trans_block_group(trans, inode); err = btrfs_drop_extents(trans, inode, cur_offset, @@ -3205,14 +3551,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) last_byte - 1, 0); btrfs_end_transaction(trans, root); - btrfs_unreserve_metadata_space(root, 2); } free_extent_map(em); + em = NULL; cur_offset = last_byte; if (cur_offset >= block_end) break; } + free_extent_map(em); unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, GFP_NOFS); return err; @@ -3239,11 +3586,10 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) } } - ret = btrfs_reserve_metadata_space(root, 1); - if (ret) - return ret; + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); ret = btrfs_orphan_add(trans, inode); @@ -3251,7 +3597,6 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) nr = trans->blocks_used; btrfs_end_transaction(trans, root); - btrfs_unreserve_metadata_space(root, 1); btrfs_btree_balance_dirty(root, nr); if (attr->ia_size > inode->i_size) { @@ -3264,8 +3609,11 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) i_size_write(inode, attr->ia_size); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = root->orphan_block_rsv; + BUG_ON(!trans->block_rsv); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -3345,10 +3693,21 @@ void btrfs_delete_inode(struct inode *inode) btrfs_i_size_write(inode, 0); while (1) { - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); - ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); + trans->block_rsv = root->orphan_block_rsv; + + ret = btrfs_block_rsv_check(trans, root, + root->orphan_block_rsv, 0, 5); + if (ret) { + BUG_ON(ret != -EAGAIN); + ret = btrfs_commit_transaction(trans, root); + BUG_ON(ret); + continue; + } + ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); if (ret != -EAGAIN) break; @@ -3356,6 +3715,7 @@ void btrfs_delete_inode(struct inode *inode) btrfs_end_transaction(trans, root); trans = NULL; btrfs_btree_balance_dirty(root, nr); + } if (ret == 0) { @@ -3596,40 +3956,10 @@ again: return 0; } -static noinline void init_btrfs_i(struct inode *inode) -{ - struct btrfs_inode *bi = BTRFS_I(inode); - - bi->generation = 0; - bi->sequence = 0; - bi->last_trans = 0; - bi->last_sub_trans = 0; - bi->logged_trans = 0; - bi->delalloc_bytes = 0; - bi->reserved_bytes = 0; - bi->disk_i_size = 0; - bi->flags = 0; - bi->index_cnt = (u64)-1; - bi->last_unlink_trans = 0; - bi->ordered_data_close = 0; - bi->force_compress = 0; - extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); - extent_io_tree_init(&BTRFS_I(inode)->io_tree, - inode->i_mapping, GFP_NOFS); - extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, - inode->i_mapping, GFP_NOFS); - INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes); - INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations); - RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); - btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); - mutex_init(&BTRFS_I(inode)->log_mutex); -} - static int btrfs_init_locked_inode(struct inode *inode, void *p) { struct btrfs_iget_args *args = p; inode->i_ino = args->ino; - init_btrfs_i(inode); BTRFS_I(inode)->root = args->root; btrfs_set_inode_space_info(args->root, inode); return 0; @@ -3692,8 +4022,6 @@ static struct inode *new_simple_dir(struct super_block *s, if (!inode) return ERR_PTR(-ENOMEM); - init_btrfs_i(inode); - BTRFS_I(inode)->root = root; memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); BTRFS_I(inode)->dummy_inode = 1; @@ -3950,7 +4278,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) struct btrfs_trans_handle *trans; int ret = 0; - if (root->fs_info->btree_inode == inode) + if (BTRFS_I(inode)->dummy_inode) return 0; if (wbc->sync_mode == WB_SYNC_ALL) { @@ -3971,10 +4299,38 @@ void btrfs_dirty_inode(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; + int ret; + + if (BTRFS_I(inode)->dummy_inode) + return; trans = btrfs_join_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); - btrfs_update_inode(trans, root, inode); + + ret = btrfs_update_inode(trans, root, inode); + if (ret && ret == -ENOSPC) { + /* whoops, lets try again with the full transaction */ + btrfs_end_transaction(trans, root); + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + if (printk_ratelimit()) { + printk(KERN_ERR "btrfs: fail to " + "dirty inode %lu error %ld\n", + inode->i_ino, PTR_ERR(trans)); + } + return; + } + btrfs_set_trans_block_group(trans, inode); + + ret = btrfs_update_inode(trans, root, inode); + if (ret) { + if (printk_ratelimit()) { + printk(KERN_ERR "btrfs: fail to " + "dirty inode %lu error %d\n", + inode->i_ino, ret); + } + } + } btrfs_end_transaction(trans, root); } @@ -4092,7 +4448,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, * btrfs_get_inode_index_count has an explanation for the magic * number */ - init_btrfs_i(inode); BTRFS_I(inode)->index_cnt = 2; BTRFS_I(inode)->root = root; BTRFS_I(inode)->generation = trans->transid; @@ -4247,26 +4602,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (!new_valid_dev(rdev)) return -EINVAL; + err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); + if (err) + return err; + /* * 2 for inode item and ref * 2 for dir items * 1 for xattr if selinux is on */ - err = btrfs_reserve_metadata_space(root, 5); - if (err) - return err; + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); - if (!trans) - goto fail; btrfs_set_trans_block_group(trans, dir); - err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); - if (err) { - err = -ENOSPC; - goto out_unlock; - } - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dentry->d_parent->d_inode->i_ino, objectid, @@ -4295,13 +4645,11 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); -fail: - btrfs_unreserve_metadata_space(root, 5); + btrfs_btree_balance_dirty(root, nr); if (drop_inode) { inode_dec_link_count(inode); iput(inode); } - btrfs_btree_balance_dirty(root, nr); return err; } @@ -4311,32 +4659,26 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(dir)->root; struct inode *inode = NULL; - int err; int drop_inode = 0; + int err; unsigned long nr = 0; u64 objectid; u64 index = 0; + err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); + if (err) + return err; /* * 2 for inode item and ref * 2 for dir items * 1 for xattr if selinux is on */ - err = btrfs_reserve_metadata_space(root, 5); - if (err) - return err; + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); - if (!trans) - goto fail; btrfs_set_trans_block_group(trans, dir); - err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); - if (err) { - err = -ENOSPC; - goto out_unlock; - } - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dentry->d_parent->d_inode->i_ino, @@ -4368,8 +4710,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); -fail: - btrfs_unreserve_metadata_space(root, 5); if (drop_inode) { inode_dec_link_count(inode); iput(inode); @@ -4396,21 +4736,21 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (root->objectid != BTRFS_I(inode)->root->objectid) return -EPERM; - /* - * 1 item for inode ref - * 2 items for dir items - */ - err = btrfs_reserve_metadata_space(root, 3); - if (err) - return err; - btrfs_inc_nlink(inode); err = btrfs_set_inode_index(dir, &index); if (err) goto fail; - trans = btrfs_start_transaction(root, 1); + /* + * 1 item for inode ref + * 2 items for dir items + */ + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto fail; + } btrfs_set_trans_block_group(trans, dir); atomic_inc(&inode->i_count); @@ -4429,7 +4769,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); fail: - btrfs_unreserve_metadata_space(root, 3); if (drop_inode) { inode_dec_link_count(inode); iput(inode); @@ -4449,28 +4788,20 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) u64 index = 0; unsigned long nr = 1; + err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); + if (err) + return err; + /* * 2 items for inode and ref * 2 items for dir items * 1 for xattr if selinux is on */ - err = btrfs_reserve_metadata_space(root, 5); - if (err) - return err; - - trans = btrfs_start_transaction(root, 1); - if (!trans) { - err = -ENOMEM; - goto out_unlock; - } + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, dir); - err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); - if (err) { - err = -ENOSPC; - goto out_fail; - } - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dentry->d_parent->d_inode->i_ino, objectid, @@ -4510,9 +4841,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) out_fail: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); - -out_unlock: - btrfs_unreserve_metadata_space(root, 5); if (drop_on_err) iput(inode); btrfs_btree_balance_dirty(root, nr); @@ -4770,6 +5098,7 @@ again: } flush_dcache_page(page); } else if (create && PageUptodate(page)) { + WARN_ON(1); if (!trans) { kunmap(page); free_extent_map(em); @@ -4866,11 +5195,651 @@ out: return em; } +static struct extent_map *btrfs_new_extent_direct(struct inode *inode, + u64 start, u64 len) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct btrfs_key ins; + u64 alloc_hint; + int ret; + + btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + + trans = btrfs_join_transaction(root, 0); + if (!trans) + return ERR_PTR(-ENOMEM); + + trans->block_rsv = &root->fs_info->delalloc_block_rsv; + + alloc_hint = get_extent_allocation_hint(inode, start, len); + ret = btrfs_reserve_extent(trans, root, len, root->sectorsize, 0, + alloc_hint, (u64)-1, &ins, 1); + if (ret) { + em = ERR_PTR(ret); + goto out; + } + + em = alloc_extent_map(GFP_NOFS); + if (!em) { + em = ERR_PTR(-ENOMEM); + goto out; + } + + em->start = start; + em->orig_start = em->start; + em->len = ins.offset; + + em->block_start = ins.objectid; + em->block_len = ins.offset; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + + while (1) { + write_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + write_unlock(&em_tree->lock); + if (ret != -EEXIST) + break; + btrfs_drop_extent_cache(inode, start, start + em->len - 1, 0); + } + + ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, + ins.offset, ins.offset, 0); + if (ret) { + btrfs_free_reserved_extent(root, ins.objectid, ins.offset); + em = ERR_PTR(ret); + } +out: + btrfs_end_transaction(trans, root); + return em; +} + +/* + * returns 1 when the nocow is safe, < 1 on error, 0 if the + * block must be cow'd + */ +static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, + struct inode *inode, u64 offset, u64 len) +{ + struct btrfs_path *path; + int ret; + struct extent_buffer *leaf; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 disk_bytenr; + u64 backref_offset; + u64 extent_end; + u64 num_bytes; + int slot; + int found_type; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, + offset, 0); + if (ret < 0) + goto out; + + slot = path->slots[0]; + if (ret == 1) { + if (slot == 0) { + /* can't find the item, must cow */ + ret = 0; + goto out; + } + slot--; + } + ret = 0; + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid != inode->i_ino || + key.type != BTRFS_EXTENT_DATA_KEY) { + /* not our file or wrong item type, must cow */ + goto out; + } + + if (key.offset > offset) { + /* Wrong offset, must cow */ + goto out; + } + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + found_type = btrfs_file_extent_type(leaf, fi); + if (found_type != BTRFS_FILE_EXTENT_REG && + found_type != BTRFS_FILE_EXTENT_PREALLOC) { + /* not a regular extent, must cow */ + goto out; + } + disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); + backref_offset = btrfs_file_extent_offset(leaf, fi); + + extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + if (extent_end < offset + len) { + /* extent doesn't include our full range, must cow */ + goto out; + } + + if (btrfs_extent_readonly(root, disk_bytenr)) + goto out; + + /* + * look for other files referencing this extent, if we + * find any we must cow + */ + if (btrfs_cross_ref_exist(trans, root, inode->i_ino, + key.offset - backref_offset, disk_bytenr)) + goto out; + + /* + * adjust disk_bytenr and num_bytes to cover just the bytes + * in this extent we are about to write. If there + * are any csums in that range we have to cow in order + * to keep the csums correct + */ + disk_bytenr += backref_offset; + disk_bytenr += offset - key.offset; + num_bytes = min(offset + len, extent_end) - offset; + if (csum_exist_in_range(root, disk_bytenr, num_bytes)) + goto out; + /* + * all of the above have passed, it is safe to overwrite this extent + * without cow + */ + ret = 1; +out: + btrfs_free_path(path); + return ret; +} + +static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) +{ + struct extent_map *em; + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 start = iblock << inode->i_blkbits; + u64 len = bh_result->b_size; + struct btrfs_trans_handle *trans; + + em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + if (IS_ERR(em)) + return PTR_ERR(em); + + /* + * Ok for INLINE and COMPRESSED extents we need to fallback on buffered + * io. INLINE is special, and we could probably kludge it in here, but + * it's still buffered so for safety lets just fall back to the generic + * buffered path. + * + * For COMPRESSED we _have_ to read the entire extent in so we can + * decompress it, so there will be buffering required no matter what we + * do, so go ahead and fallback to buffered. + * + * We return -ENOTBLK because thats what makes DIO go ahead and go back + * to buffered IO. Don't blame me, this is the price we pay for using + * the generic code. + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || + em->block_start == EXTENT_MAP_INLINE) { + free_extent_map(em); + return -ENOTBLK; + } + + /* Just a good old fashioned hole, return */ + if (!create && (em->block_start == EXTENT_MAP_HOLE || + test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { + free_extent_map(em); + /* DIO will do one hole at a time, so just unlock a sector */ + unlock_extent(&BTRFS_I(inode)->io_tree, start, + start + root->sectorsize - 1, GFP_NOFS); + return 0; + } + + /* + * We don't allocate a new extent in the following cases + * + * 1) The inode is marked as NODATACOW. In this case we'll just use the + * existing extent. + * 2) The extent is marked as PREALLOC. We're good to go here and can + * just use the extent. + * + */ + if (!create) { + len = em->len - (start - em->start); + goto map; + } + + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || + ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && + em->block_start != EXTENT_MAP_HOLE)) { + int type; + int ret; + u64 block_start; + + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) + type = BTRFS_ORDERED_PREALLOC; + else + type = BTRFS_ORDERED_NOCOW; + len = min(len, em->len - (start - em->start)); + block_start = em->block_start + (start - em->start); + + /* + * we're not going to log anything, but we do need + * to make sure the current transaction stays open + * while we look for nocow cross refs + */ + trans = btrfs_join_transaction(root, 0); + if (!trans) + goto must_cow; + + if (can_nocow_odirect(trans, inode, start, len) == 1) { + ret = btrfs_add_ordered_extent_dio(inode, start, + block_start, len, len, type); + btrfs_end_transaction(trans, root); + if (ret) { + free_extent_map(em); + return ret; + } + goto unlock; + } + btrfs_end_transaction(trans, root); + } +must_cow: + /* + * this will cow the extent, reset the len in case we changed + * it above + */ + len = bh_result->b_size; + free_extent_map(em); + em = btrfs_new_extent_direct(inode, start, len); + if (IS_ERR(em)) + return PTR_ERR(em); + len = min(len, em->len - (start - em->start)); +unlock: + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, + EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, + 0, NULL, GFP_NOFS); +map: + bh_result->b_blocknr = (em->block_start + (start - em->start)) >> + inode->i_blkbits; + bh_result->b_size = len; + bh_result->b_bdev = em->bdev; + set_buffer_mapped(bh_result); + if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) + set_buffer_new(bh_result); + + free_extent_map(em); + + return 0; +} + +struct btrfs_dio_private { + struct inode *inode; + u64 logical_offset; + u64 disk_bytenr; + u64 bytes; + u32 *csums; + void *private; +}; + +static void btrfs_endio_direct_read(struct bio *bio, int err) +{ + struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; + struct bio_vec *bvec = bio->bi_io_vec; + struct btrfs_dio_private *dip = bio->bi_private; + struct inode *inode = dip->inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 start; + u32 *private = dip->csums; + + start = dip->logical_offset; + do { + if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { + struct page *page = bvec->bv_page; + char *kaddr; + u32 csum = ~(u32)0; + unsigned long flags; + + local_irq_save(flags); + kaddr = kmap_atomic(page, KM_IRQ0); + csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, + csum, bvec->bv_len); + btrfs_csum_final(csum, (char *)&csum); + kunmap_atomic(kaddr, KM_IRQ0); + local_irq_restore(flags); + + flush_dcache_page(bvec->bv_page); + if (csum != *private) { + printk(KERN_ERR "btrfs csum failed ino %lu off" + " %llu csum %u private %u\n", + inode->i_ino, (unsigned long long)start, + csum, *private); + err = -EIO; + } + } + + start += bvec->bv_len; + private++; + bvec++; + } while (bvec <= bvec_end); + + unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, + dip->logical_offset + dip->bytes - 1, GFP_NOFS); + bio->bi_private = dip->private; + + kfree(dip->csums); + kfree(dip); + dio_end_io(bio, err); +} + +static void btrfs_endio_direct_write(struct bio *bio, int err) +{ + struct btrfs_dio_private *dip = bio->bi_private; + struct inode *inode = dip->inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + struct btrfs_ordered_extent *ordered = NULL; + struct extent_state *cached_state = NULL; + int ret; + + if (err) + goto out_done; + + ret = btrfs_dec_test_ordered_pending(inode, &ordered, + dip->logical_offset, dip->bytes); + if (!ret) + goto out_done; + + BUG_ON(!ordered); + + trans = btrfs_join_transaction(root, 1); + if (!trans) { + err = -ENOMEM; + goto out; + } + trans->block_rsv = &root->fs_info->delalloc_block_rsv; + + if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + ret = btrfs_ordered_update_i_size(inode, 0, ordered); + if (!ret) + ret = btrfs_update_inode(trans, root, inode); + err = ret; + goto out; + } + + lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset, + ordered->file_offset + ordered->len - 1, 0, + &cached_state, GFP_NOFS); + + if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { + ret = btrfs_mark_extent_written(trans, inode, + ordered->file_offset, + ordered->file_offset + + ordered->len); + if (ret) { + err = ret; + goto out_unlock; + } + } else { + ret = insert_reserved_file_extent(trans, inode, + ordered->file_offset, + ordered->start, + ordered->disk_len, + ordered->len, + ordered->len, + 0, 0, 0, + BTRFS_FILE_EXTENT_REG); + unpin_extent_cache(&BTRFS_I(inode)->extent_tree, + ordered->file_offset, ordered->len); + if (ret) { + err = ret; + WARN_ON(1); + goto out_unlock; + } + } + + add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); + btrfs_ordered_update_i_size(inode, 0, ordered); + btrfs_update_inode(trans, root, inode); +out_unlock: + unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, + ordered->file_offset + ordered->len - 1, + &cached_state, GFP_NOFS); +out: + btrfs_delalloc_release_metadata(inode, ordered->len); + btrfs_end_transaction(trans, root); + btrfs_put_ordered_extent(ordered); + btrfs_put_ordered_extent(ordered); +out_done: + bio->bi_private = dip->private; + + kfree(dip->csums); + kfree(dip); + dio_end_io(bio, err); +} + +static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags, u64 offset) +{ + int ret; + struct btrfs_root *root = BTRFS_I(inode)->root; + ret = btrfs_csum_one_bio(root, inode, bio, offset, 1); + BUG_ON(ret); + return 0; +} + +static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, + loff_t file_offset) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_dio_private *dip; + struct bio_vec *bvec = bio->bi_io_vec; + u64 start; + int skip_sum; + int write = rw & (1 << BIO_RW); + int ret = 0; + + skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; + + dip = kmalloc(sizeof(*dip), GFP_NOFS); + if (!dip) { + ret = -ENOMEM; + goto free_ordered; + } + dip->csums = NULL; + + if (!skip_sum) { + dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); + if (!dip->csums) { + ret = -ENOMEM; + goto free_ordered; + } + } + + dip->private = bio->bi_private; + dip->inode = inode; + dip->logical_offset = file_offset; + + start = dip->logical_offset; + dip->bytes = 0; + do { + dip->bytes += bvec->bv_len; + bvec++; + } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); + + dip->disk_bytenr = (u64)bio->bi_sector << 9; + bio->bi_private = dip; + + if (write) + bio->bi_end_io = btrfs_endio_direct_write; + else + bio->bi_end_io = btrfs_endio_direct_read; + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + if (ret) + goto out_err; + + if (write && !skip_sum) { + ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, + inode, rw, bio, 0, 0, + dip->logical_offset, + __btrfs_submit_bio_start_direct_io, + __btrfs_submit_bio_done); + if (ret) + goto out_err; + return; + } else if (!skip_sum) + btrfs_lookup_bio_sums_dio(root, inode, bio, + dip->logical_offset, dip->csums); + + ret = btrfs_map_bio(root, rw, bio, 0, 1); + if (ret) + goto out_err; + return; +out_err: + kfree(dip->csums); + kfree(dip); +free_ordered: + /* + * If this is a write, we need to clean up the reserved space and kill + * the ordered extent. + */ + if (write) { + struct btrfs_ordered_extent *ordered; + ordered = btrfs_lookup_ordered_extent(inode, + dip->logical_offset); + if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && + !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) + btrfs_free_reserved_extent(root, ordered->start, + ordered->disk_len); + btrfs_put_ordered_extent(ordered); + btrfs_put_ordered_extent(ordered); + } + bio_endio(bio, ret); +} + +static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, + const struct iovec *iov, loff_t offset, + unsigned long nr_segs) +{ + int seg; + size_t size; + unsigned long addr; + unsigned blocksize_mask = root->sectorsize - 1; + ssize_t retval = -EINVAL; + loff_t end = offset; + + if (offset & blocksize_mask) + goto out; + + /* Check the memory alignment. Blocks cannot straddle pages */ + for (seg = 0; seg < nr_segs; seg++) { + addr = (unsigned long)iov[seg].iov_base; + size = iov[seg].iov_len; + end += size; + if ((addr & blocksize_mask) || (size & blocksize_mask)) + goto out; + } + retval = 0; +out: + return retval; +} static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { - return -EINVAL; + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; + u64 lockstart, lockend; + ssize_t ret; + int writing = rw & WRITE; + int write_bits = 0; + size_t count = iov_length(iov, nr_segs); + + if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, + offset, nr_segs)) { + return 0; + } + + lockstart = offset; + lockend = offset + count - 1; + + if (writing) { + ret = btrfs_delalloc_reserve_space(inode, count); + if (ret) + goto out; + } + + while (1) { + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, + 0, &cached_state, GFP_NOFS); + /* + * We're concerned with the entire range that we're going to be + * doing DIO to, so we need to make sure theres no ordered + * extents in this range. + */ + ordered = btrfs_lookup_ordered_range(inode, lockstart, + lockend - lockstart + 1); + if (!ordered) + break; + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state, GFP_NOFS); + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + cond_resched(); + } + + /* + * we don't use btrfs_set_extent_delalloc because we don't want + * the dirty or uptodate bits + */ + if (writing) { + write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + EXTENT_DELALLOC, 0, NULL, &cached_state, + GFP_NOFS); + if (ret) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_LOCKED | write_bits, + 1, 0, &cached_state, GFP_NOFS); + goto out; + } + } + + free_extent_state(cached_state); + cached_state = NULL; + + ret = __blockdev_direct_IO(rw, iocb, inode, + BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, + iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, + btrfs_submit_direct, 0); + + if (ret < 0 && ret != -EIOCBQUEUED) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { + /* + * We're falling back to buffered, unlock the section we didn't + * do IO on. + */ + clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, + offset + iov_length(iov, nr_segs) - 1, + EXTENT_LOCKED | write_bits, 1, 0, + &cached_state, GFP_NOFS); + } +out: + free_extent_state(cached_state); + return ret; } static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -5034,7 +6003,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) u64 page_start; u64 page_end; - ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); + ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) { if (ret == -ENOMEM) ret = VM_FAULT_OOM; @@ -5043,13 +6012,6 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) goto out; } - ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); - if (ret) { - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); - ret = VM_FAULT_SIGBUS; - goto out; - } - ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ again: lock_page(page); @@ -5059,7 +6021,6 @@ again: if ((page->mapping != inode->i_mapping) || (page_start >= size)) { - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); /* page got truncated out from underneath us */ goto out_unlock; } @@ -5100,7 +6061,6 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); ret = VM_FAULT_SIGBUS; - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); goto out_unlock; } ret = 0; @@ -5127,10 +6087,10 @@ again: unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); out_unlock: - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); if (!ret) return VM_FAULT_LOCKED; unlock_page(page); + btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); out: return ret; } @@ -5155,8 +6115,10 @@ static void btrfs_truncate(struct inode *inode) btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = root->orphan_block_rsv; /* * setattr is responsible for setting the ordered_data_close flag, @@ -5179,6 +6141,23 @@ static void btrfs_truncate(struct inode *inode) btrfs_add_ordered_operation(trans, root, inode); while (1) { + if (!trans) { + trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = root->orphan_block_rsv; + } + + ret = btrfs_block_rsv_check(trans, root, + root->orphan_block_rsv, 0, 5); + if (ret) { + BUG_ON(ret != -EAGAIN); + ret = btrfs_commit_transaction(trans, root); + BUG_ON(ret); + trans = NULL; + continue; + } + ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, BTRFS_EXTENT_DATA_KEY); @@ -5190,10 +6169,8 @@ static void btrfs_truncate(struct inode *inode) nr = trans->blocks_used; btrfs_end_transaction(trans, root); + trans = NULL; btrfs_btree_balance_dirty(root, nr); - - trans = btrfs_start_transaction(root, 1); - btrfs_set_trans_block_group(trans, inode); } if (ret == 0 && inode->i_nlink > 0) { @@ -5254,21 +6231,47 @@ unsigned long btrfs_force_ra(struct address_space *mapping, struct inode *btrfs_alloc_inode(struct super_block *sb) { struct btrfs_inode *ei; + struct inode *inode; ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; + + ei->root = NULL; + ei->space_info = NULL; + ei->generation = 0; + ei->sequence = 0; ei->last_trans = 0; ei->last_sub_trans = 0; ei->logged_trans = 0; - ei->outstanding_extents = 0; - ei->reserved_extents = 0; - ei->root = NULL; + ei->delalloc_bytes = 0; + ei->reserved_bytes = 0; + ei->disk_i_size = 0; + ei->flags = 0; + ei->index_cnt = (u64)-1; + ei->last_unlink_trans = 0; + spin_lock_init(&ei->accounting_lock); + atomic_set(&ei->outstanding_extents, 0); + ei->reserved_extents = 0; + + ei->ordered_data_close = 0; + ei->orphan_meta_reserved = 0; + ei->dummy_inode = 0; + ei->force_compress = 0; + + inode = &ei->vfs_inode; + extent_map_tree_init(&ei->extent_tree, GFP_NOFS); + extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); + extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); + mutex_init(&ei->log_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->i_orphan); + INIT_LIST_HEAD(&ei->delalloc_inodes); INIT_LIST_HEAD(&ei->ordered_operations); - return &ei->vfs_inode; + RB_CLEAR_NODE(&ei->rb_node); + + return inode; } void btrfs_destroy_inode(struct inode *inode) @@ -5278,6 +6281,8 @@ void btrfs_destroy_inode(struct inode *inode) WARN_ON(!list_empty(&inode->i_dentry)); WARN_ON(inode->i_data.nrpages); + WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents)); + WARN_ON(BTRFS_I(inode)->reserved_extents); /* * This can happen where we create an inode, but somebody else also @@ -5298,13 +6303,13 @@ void btrfs_destroy_inode(struct inode *inode) spin_unlock(&root->fs_info->ordered_extent_lock); } - spin_lock(&root->list_lock); + spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", inode->i_ino); list_del_init(&BTRFS_I(inode)->i_orphan); } - spin_unlock(&root->list_lock); + spin_unlock(&root->orphan_lock); while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); @@ -5425,19 +6430,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (S_ISDIR(old_inode->i_mode) && new_inode && new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; - - /* - * We want to reserve the absolute worst case amount of items. So if - * both inodes are subvols and we need to unlink them then that would - * require 4 item modifications, but if they are both normal inodes it - * would require 5 item modifications, so we'll assume their normal - * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items - * should cover the worst case number of items we'll modify. - */ - ret = btrfs_reserve_metadata_space(root, 11); - if (ret) - return ret; - /* * we're using rename to replace one file with another. * and the replacement file is large. Start IO on it now so @@ -5450,8 +6442,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, /* close the racy window with snapshot create/destroy ioctl */ if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) down_read(&root->fs_info->subvol_sem); + /* + * We want to reserve the absolute worst case amount of items. So if + * both inodes are subvols and we need to unlink them then that would + * require 4 item modifications, but if they are both normal inodes it + * would require 5 item modifications, so we'll assume their normal + * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items + * should cover the worst case number of items we'll modify. + */ + trans = btrfs_start_transaction(root, 20); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, new_dir); if (dest != root) @@ -5550,7 +6552,6 @@ out_fail: if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) up_read(&root->fs_info->subvol_sem); - btrfs_unreserve_metadata_space(root, 11); return ret; } @@ -5602,6 +6603,38 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) return 0; } +int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput) +{ + struct btrfs_inode *binode; + struct inode *inode = NULL; + + spin_lock(&root->fs_info->delalloc_lock); + while (!list_empty(&root->fs_info->delalloc_inodes)) { + binode = list_entry(root->fs_info->delalloc_inodes.next, + struct btrfs_inode, delalloc_inodes); + inode = igrab(&binode->vfs_inode); + if (inode) { + list_move_tail(&binode->delalloc_inodes, + &root->fs_info->delalloc_inodes); + break; + } + + list_del_init(&binode->delalloc_inodes); + cond_resched_lock(&root->fs_info->delalloc_lock); + } + spin_unlock(&root->fs_info->delalloc_lock); + + if (inode) { + write_inode_now(inode, 0); + if (delay_iput) + btrfs_add_delayed_iput(inode); + else + iput(inode); + return 1; + } + return 0; +} + static int btrfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { @@ -5625,26 +6658,20 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) return -ENAMETOOLONG; + err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); + if (err) + return err; /* * 2 items for inode item and ref * 2 items for dir items * 1 item for xattr if selinux is on */ - err = btrfs_reserve_metadata_space(root, 5); - if (err) - return err; + trans = btrfs_start_transaction(root, 5); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); - if (!trans) - goto out_fail; btrfs_set_trans_block_group(trans, dir); - err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); - if (err) { - err = -ENOSPC; - goto out_unlock; - } - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dentry->d_parent->d_inode->i_ino, objectid, @@ -5716,8 +6743,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); -out_fail: - btrfs_unreserve_metadata_space(root, 5); if (drop_inode) { inode_dec_link_count(inode); iput(inode); @@ -5726,33 +6751,28 @@ out_fail: return err; } -static int prealloc_file_range(struct inode *inode, u64 start, u64 end, - u64 alloc_hint, int mode, loff_t actual_len) +int btrfs_prealloc_file_range(struct inode *inode, int mode, + u64 start, u64 num_bytes, u64 min_size, + loff_t actual_len, u64 *alloc_hint) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key ins; u64 cur_offset = start; - u64 num_bytes = end - start; int ret = 0; - u64 i_size; while (num_bytes > 0) { - trans = btrfs_start_transaction(root, 1); - - ret = btrfs_reserve_extent(trans, root, num_bytes, - root->sectorsize, 0, alloc_hint, - (u64)-1, &ins, 1); - if (ret) { - WARN_ON(1); - goto stop_trans; + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + break; } - ret = btrfs_reserve_metadata_space(root, 3); + ret = btrfs_reserve_extent(trans, root, num_bytes, min_size, + 0, *alloc_hint, (u64)-1, &ins, 1); if (ret) { - btrfs_free_reserved_extent(root, ins.objectid, - ins.offset); - goto stop_trans; + btrfs_end_transaction(trans, root); + break; } ret = insert_reserved_file_extent(trans, inode, @@ -5766,34 +6786,27 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end, num_bytes -= ins.offset; cur_offset += ins.offset; - alloc_hint = ins.objectid + ins.offset; + *alloc_hint = ins.objectid + ins.offset; inode->i_ctime = CURRENT_TIME; BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; if (!(mode & FALLOC_FL_KEEP_SIZE) && - (actual_len > inode->i_size) && - (cur_offset > inode->i_size)) { - + (actual_len > inode->i_size) && + (cur_offset > inode->i_size)) { if (cur_offset > actual_len) - i_size = actual_len; + i_size_write(inode, actual_len); else - i_size = cur_offset; - i_size_write(inode, i_size); - btrfs_ordered_update_i_size(inode, i_size, NULL); + i_size_write(inode, cur_offset); + i_size_write(inode, cur_offset); + btrfs_ordered_update_i_size(inode, cur_offset, NULL); } ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); btrfs_end_transaction(trans, root); - btrfs_unreserve_metadata_space(root, 3); } return ret; - -stop_trans: - btrfs_end_transaction(trans, root); - return ret; - } static long btrfs_fallocate(struct inode *inode, int mode, @@ -5826,8 +6839,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, goto out; } - ret = btrfs_check_data_free_space(BTRFS_I(inode)->root, inode, - alloc_end - alloc_start); + ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); if (ret) goto out; @@ -5872,16 +6884,16 @@ static long btrfs_fallocate(struct inode *inode, int mode, if (em->block_start == EXTENT_MAP_HOLE || (cur_offset >= inode->i_size && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { - ret = prealloc_file_range(inode, - cur_offset, last_byte, - alloc_hint, mode, offset+len); + ret = btrfs_prealloc_file_range(inode, 0, cur_offset, + last_byte - cur_offset, + 1 << inode->i_blkbits, + offset + len, + &alloc_hint); if (ret < 0) { free_extent_map(em); break; } } - if (em->block_start <= EXTENT_MAP_LAST_BYTE) - alloc_hint = em->block_start; free_extent_map(em); cur_offset = last_byte; @@ -5893,8 +6905,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, &cached_state, GFP_NOFS); - btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, - alloc_end - alloc_start); + btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); out: mutex_unlock(&inode->i_mutex); return ret; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 97a97839a867..4cdb98cf26de 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -239,23 +239,19 @@ static noinline int create_subvol(struct btrfs_root *root, u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; + ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, + 0, &objectid); + if (ret) + return ret; /* * 1 - inode item * 2 - refs * 1 - root item * 2 - dir items */ - ret = btrfs_reserve_metadata_space(root, 6); - if (ret) - return ret; - - trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); - - ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root, - 0, &objectid); - if (ret) - goto fail; + trans = btrfs_start_transaction(root, 6); + if (IS_ERR(trans)) + return PTR_ERR(trans); leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, objectid, NULL, 0, 0, 0); @@ -345,13 +341,10 @@ fail: err = btrfs_commit_transaction(trans, root); if (err && !ret) ret = err; - - btrfs_unreserve_metadata_space(root, 6); return ret; } -static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, - char *name, int namelen) +static int create_snapshot(struct btrfs_root *root, struct dentry *dentry) { struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; @@ -361,40 +354,33 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, if (!root->ref_cows) return -EINVAL; - /* - * 1 - inode item - * 2 - refs - * 1 - root item - * 2 - dir items - */ - ret = btrfs_reserve_metadata_space(root, 6); - if (ret) - goto fail; - pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS); - if (!pending_snapshot) { - ret = -ENOMEM; - btrfs_unreserve_metadata_space(root, 6); - goto fail; - } - pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS); - if (!pending_snapshot->name) { - ret = -ENOMEM; - kfree(pending_snapshot); - btrfs_unreserve_metadata_space(root, 6); - goto fail; - } - memcpy(pending_snapshot->name, name, namelen); - pending_snapshot->name[namelen] = '\0'; + if (!pending_snapshot) + return -ENOMEM; + + btrfs_init_block_rsv(&pending_snapshot->block_rsv); pending_snapshot->dentry = dentry; - trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); pending_snapshot->root = root; + + trans = btrfs_start_transaction(root->fs_info->extent_root, 5); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto fail; + } + + ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); + BUG_ON(ret); + list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); - ret = btrfs_commit_transaction(trans, root); + ret = btrfs_commit_transaction(trans, root->fs_info->extent_root); BUG_ON(ret); - btrfs_unreserve_metadata_space(root, 6); + + ret = pending_snapshot->error; + if (ret) + goto fail; + + btrfs_orphan_cleanup(pending_snapshot->snap); inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); if (IS_ERR(inode)) { @@ -405,6 +391,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, d_instantiate(dentry, inode); ret = 0; fail: + kfree(pending_snapshot); return ret; } @@ -456,8 +443,7 @@ static noinline int btrfs_mksubvol(struct path *parent, goto out_up_read; if (snap_src) { - error = create_snapshot(snap_src, dentry, - name, namelen); + error = create_snapshot(snap_src, dentry); } else { error = create_subvol(BTRFS_I(dir)->root, dentry, name, namelen); @@ -601,19 +587,9 @@ static int btrfs_defrag_file(struct file *file, if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) BTRFS_I(inode)->force_compress = 1; - ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); - if (ret) { - ret = -ENOSPC; - break; - } - - ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); - if (ret) { - btrfs_free_reserved_data_space(root, inode, - PAGE_CACHE_SIZE); - ret = -ENOSPC; - break; - } + ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); + if (ret) + goto err_unlock; again: if (inode->i_size == 0 || i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { @@ -622,8 +598,10 @@ again: } page = grab_cache_page(inode->i_mapping, i); - if (!page) + if (!page) { + ret = -ENOMEM; goto err_reservations; + } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); @@ -631,6 +609,7 @@ again: if (!PageUptodate(page)) { unlock_page(page); page_cache_release(page); + ret = -EIO; goto err_reservations; } } @@ -644,8 +623,7 @@ again: wait_on_page_writeback(page); if (PageDirty(page)) { - btrfs_free_reserved_data_space(root, inode, - PAGE_CACHE_SIZE); + btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); goto loop_unlock; } @@ -683,7 +661,6 @@ loop_unlock: page_cache_release(page); mutex_unlock(&inode->i_mutex); - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); i++; } @@ -713,9 +690,9 @@ loop_unlock: return 0; err_reservations: + btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); +err_unlock: mutex_unlock(&inode->i_mutex); - btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); - btrfs_unreserve_metadata_for_delalloc(root, inode, 1); return ret; } @@ -811,7 +788,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, device->name, (unsigned long long)new_size); if (new_size > old_size) { - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -1300,7 +1277,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, if (err) goto out_up_write; - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } + trans->block_rsv = &root->fs_info->global_block_rsv; + ret = btrfs_unlink_subvol(trans, root, dir, dest->root_key.objectid, dentry->d_name.name, @@ -1314,10 +1297,12 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, dest->root_item.drop_level = 0; btrfs_set_root_refs(&dest->root_item, 0); - ret = btrfs_insert_orphan_item(trans, - root->fs_info->tree_root, - dest->root_key.objectid); - BUG_ON(ret); + if (!xchg(&dest->orphan_item_inserted, 1)) { + ret = btrfs_insert_orphan_item(trans, + root->fs_info->tree_root, + dest->root_key.objectid); + BUG_ON(ret); + } ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -1358,8 +1343,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) ret = -EPERM; goto out; } - btrfs_defrag_root(root, 0); - btrfs_defrag_root(root->fs_info->extent_root, 0); + ret = btrfs_defrag_root(root, 0); + if (ret) + goto out; + ret = btrfs_defrag_root(root->fs_info->extent_root, 0); break; case S_IFREG: if (!(file->f_mode & FMODE_WRITE)) { @@ -1389,9 +1376,11 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) /* the rest are all set to zero by kzalloc */ range->len = (u64)-1; } - btrfs_defrag_file(file, range); + ret = btrfs_defrag_file(file, range); kfree(range); break; + default: + ret = -EINVAL; } out: mnt_drop_write(file->f_path.mnt); @@ -1550,12 +1539,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_wait_ordered_range(src, off, off+len); } - trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); - - /* punch hole in destination first */ - btrfs_drop_extents(trans, inode, off, off + len, &hint_byte, 1); - /* clone data */ key.objectid = src->i_ino; key.type = BTRFS_EXTENT_DATA_KEY; @@ -1566,7 +1549,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, * note the key will change type as we walk through the * tree. */ - ret = btrfs_search_slot(trans, root, &key, path, 0, 0); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; @@ -1629,12 +1612,31 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, new_key.objectid = inode->i_ino; new_key.offset = key.offset + destoff - off; + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { + if (off > key.offset) { + datao += off - key.offset; + datal -= off - key.offset; + } + + if (key.offset + datal > off + len) + datal = off + len - key.offset; + + ret = btrfs_drop_extents(trans, inode, + new_key.offset, + new_key.offset + datal, + &hint_byte, 1); + BUG_ON(ret); + ret = btrfs_insert_empty_item(trans, root, path, &new_key, size); - if (ret) - goto out; + BUG_ON(ret); leaf = path->nodes[0]; slot = path->slots[0]; @@ -1645,14 +1647,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - if (off > key.offset) { - datao += off - key.offset; - datal -= off - key.offset; - } - - if (key.offset + datal > off + len) - datal = off + len - key.offset; - /* disko == 0 means it's a hole */ if (!disko) datao = 0; @@ -1683,14 +1677,21 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (comp && (skip || trim)) { ret = -EINVAL; + btrfs_end_transaction(trans, root); goto out; } size -= skip + trim; datal -= skip + trim; + + ret = btrfs_drop_extents(trans, inode, + new_key.offset, + new_key.offset + datal, + &hint_byte, 1); + BUG_ON(ret); + ret = btrfs_insert_empty_item(trans, root, path, &new_key, size); - if (ret) - goto out; + BUG_ON(ret); if (skip) { u32 start = @@ -1708,8 +1709,17 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, } btrfs_mark_buffer_dirty(leaf); - } + btrfs_release_path(root, path); + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + if (new_key.offset + datal > inode->i_size) + btrfs_i_size_write(inode, + new_key.offset + datal); + BTRFS_I(inode)->flags = BTRFS_I(src)->flags; + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + btrfs_end_transaction(trans, root); + } next: btrfs_release_path(root, path); key.offset++; @@ -1717,17 +1727,7 @@ next: ret = 0; out: btrfs_release_path(root, path); - if (ret == 0) { - inode->i_mtime = inode->i_ctime = CURRENT_TIME; - if (destoff + olen > inode->i_size) - btrfs_i_size_write(inode, destoff + olen); - BTRFS_I(inode)->flags = BTRFS_I(src)->flags; - ret = btrfs_update_inode(trans, root, inode); - } - btrfs_end_transaction(trans, root); unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); - if (ret) - vmtruncate(inode, 0); out_unlock: mutex_unlock(&src->i_mutex); mutex_unlock(&inode->i_mutex); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index a127c0ebb2dc..e56c72bc5add 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -124,6 +124,15 @@ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) return 1; } +static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, + u64 len) +{ + if (file_offset + len <= entry->file_offset || + entry->file_offset + entry->len <= file_offset) + return 0; + return 1; +} + /* * look find the first ordered struct that has this offset, otherwise * the first one less than this offset @@ -161,8 +170,9 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, * The tree is given a single reference on the ordered extent that was * inserted. */ -int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type) +static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, + int type, int dio) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -182,6 +192,9 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); + if (dio) + set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); + /* one ref for the tree */ atomic_set(&entry->refs, 1); init_waitqueue_head(&entry->wait); @@ -203,6 +216,20 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, return 0; } +int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, int type) +{ + return __btrfs_add_ordered_extent(inode, file_offset, start, len, + disk_len, type, 0); +} + +int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, int type) +{ + return __btrfs_add_ordered_extent(inode, file_offset, start, len, + disk_len, type, 1); +} + /* * Add a struct btrfs_ordered_sum into the list of checksums to be inserted * when an ordered extent is finished. If the list covers more than one @@ -311,13 +338,6 @@ static int __btrfs_remove_ordered_extent(struct inode *inode, tree->last = NULL; set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); - spin_lock(&BTRFS_I(inode)->accounting_lock); - WARN_ON(!BTRFS_I(inode)->outstanding_extents); - BTRFS_I(inode)->outstanding_extents--; - spin_unlock(&BTRFS_I(inode)->accounting_lock); - btrfs_unreserve_metadata_for_delalloc(BTRFS_I(inode)->root, - inode, 1); - spin_lock(&root->fs_info->ordered_extent_lock); list_del_init(&entry->root_extent_list); @@ -491,7 +511,8 @@ void btrfs_start_ordered_extent(struct inode *inode, * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ - filemap_fdatawrite_range(inode->i_mapping, start, end); + if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) + filemap_fdatawrite_range(inode->i_mapping, start, end); if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); @@ -588,6 +609,47 @@ out: return entry; } +/* Since the DIO code tries to lock a wide area we need to look for any ordered + * extents that exist in the range, rather than just the start of the range. + */ +struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, + u64 file_offset, + u64 len) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; + spin_lock(&tree->lock); + node = tree_search(tree, file_offset); + if (!node) { + node = tree_search(tree, file_offset + len); + if (!node) + goto out; + } + + while (1) { + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (range_overlaps(entry, file_offset, len)) + break; + + if (entry->file_offset >= file_offset + len) { + entry = NULL; + break; + } + entry = NULL; + node = rb_next(node); + if (!node) + break; + } +out: + if (entry) + atomic_inc(&entry->refs); + spin_unlock(&tree->lock); + return entry; +} + /* * lookup and return any extent before 'file_offset'. NULL is returned * if none is found diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index c82f76a9f040..8ac365492a3f 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -72,6 +72,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ +#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -140,7 +142,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int tyep); + u64 start, u64 len, u64 disk_len, int type); +int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, int type); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); @@ -151,6 +155,9 @@ void btrfs_start_ordered_extent(struct inode *inode, int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); +struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, + u64 file_offset, + u64 len); int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, struct btrfs_ordered_extent *ordered); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index e558dd941ded..05d41e569236 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -44,8 +44,12 @@ struct tree_entry { struct backref_node { struct rb_node rb_node; u64 bytenr; - /* objectid tree block owner */ + + u64 new_bytenr; + /* objectid of tree block owner, can be not uptodate */ u64 owner; + /* link to pending, changed or detached list */ + struct list_head list; /* list of upper level blocks reference this block */ struct list_head upper; /* list of child blocks in the cache */ @@ -56,9 +60,9 @@ struct backref_node { struct extent_buffer *eb; /* level of tree block */ unsigned int level:8; - /* 1 if the block is root of old snapshot */ - unsigned int old_root:1; - /* 1 if no child blocks in the cache */ + /* is the block in non-reference counted tree */ + unsigned int cowonly:1; + /* 1 if no child node in the cache */ unsigned int lowest:1; /* is the extent buffer locked */ unsigned int locked:1; @@ -66,6 +70,16 @@ struct backref_node { unsigned int processed:1; /* have backrefs of this block been checked */ unsigned int checked:1; + /* + * 1 if corresponding block has been cowed but some upper + * level block pointers may not point to the new location + */ + unsigned int pending:1; + /* + * 1 if the backref node isn't connected to any other + * backref node. + */ + unsigned int detached:1; }; /* @@ -74,7 +88,6 @@ struct backref_node { struct backref_edge { struct list_head list[2]; struct backref_node *node[2]; - u64 blockptr; }; #define LOWER 0 @@ -83,9 +96,25 @@ struct backref_edge { struct backref_cache { /* red black tree of all backref nodes in the cache */ struct rb_root rb_root; - /* list of backref nodes with no child block in the cache */ + /* for passing backref nodes to btrfs_reloc_cow_block */ + struct backref_node *path[BTRFS_MAX_LEVEL]; + /* + * list of blocks that have been cowed but some block + * pointers in upper level blocks may not reflect the + * new location + */ struct list_head pending[BTRFS_MAX_LEVEL]; - spinlock_t lock; + /* list of backref nodes with no child node */ + struct list_head leaves; + /* list of blocks that have been cowed in current transaction */ + struct list_head changed; + /* list of detached backref node. */ + struct list_head detached; + + u64 last_trans; + + int nr_nodes; + int nr_edges; }; /* @@ -113,15 +142,6 @@ struct tree_block { unsigned int key_ready:1; }; -/* inode vector */ -#define INODEVEC_SIZE 16 - -struct inodevec { - struct list_head list; - struct inode *inode[INODEVEC_SIZE]; - int nr; -}; - #define MAX_EXTENTS 128 struct file_extent_cluster { @@ -138,36 +158,43 @@ struct reloc_control { struct btrfs_root *extent_root; /* inode for moving data */ struct inode *data_inode; - struct btrfs_workers workers; + + struct btrfs_block_rsv *block_rsv; + + struct backref_cache backref_cache; + + struct file_extent_cluster cluster; /* tree blocks have been processed */ struct extent_io_tree processed_blocks; /* map start of tree root to corresponding reloc tree */ struct mapping_tree reloc_root_tree; /* list of reloc trees */ struct list_head reloc_roots; + /* size of metadata reservation for merging reloc trees */ + u64 merging_rsv_size; + /* size of relocated tree nodes */ + u64 nodes_relocated; + u64 search_start; u64 extents_found; - u64 extents_skipped; - int stage; - int create_reloc_root; + + int block_rsv_retries; + + unsigned int stage:8; + unsigned int create_reloc_tree:1; + unsigned int merge_reloc_tree:1; unsigned int found_file_extent:1; - unsigned int found_old_snapshot:1; + unsigned int commit_transaction:1; }; /* stages of data relocation */ #define MOVE_DATA_EXTENTS 0 #define UPDATE_DATA_PTRS 1 -/* - * merge reloc tree to corresponding fs tree in worker threads - */ -struct async_merge { - struct btrfs_work work; - struct reloc_control *rc; - struct btrfs_root *root; - struct completion *done; - atomic_t *num_pending; -}; +static void remove_backref_node(struct backref_cache *cache, + struct backref_node *node); +static void __mark_block_processed(struct reloc_control *rc, + struct backref_node *node); static void mapping_tree_init(struct mapping_tree *tree) { @@ -181,15 +208,80 @@ static void backref_cache_init(struct backref_cache *cache) cache->rb_root = RB_ROOT; for (i = 0; i < BTRFS_MAX_LEVEL; i++) INIT_LIST_HEAD(&cache->pending[i]); - spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->changed); + INIT_LIST_HEAD(&cache->detached); + INIT_LIST_HEAD(&cache->leaves); +} + +static void backref_cache_cleanup(struct backref_cache *cache) +{ + struct backref_node *node; + int i; + + while (!list_empty(&cache->detached)) { + node = list_entry(cache->detached.next, + struct backref_node, list); + remove_backref_node(cache, node); + } + + while (!list_empty(&cache->leaves)) { + node = list_entry(cache->leaves.next, + struct backref_node, lower); + remove_backref_node(cache, node); + } + + cache->last_trans = 0; + + for (i = 0; i < BTRFS_MAX_LEVEL; i++) + BUG_ON(!list_empty(&cache->pending[i])); + BUG_ON(!list_empty(&cache->changed)); + BUG_ON(!list_empty(&cache->detached)); + BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); + BUG_ON(cache->nr_nodes); + BUG_ON(cache->nr_edges); +} + +static struct backref_node *alloc_backref_node(struct backref_cache *cache) +{ + struct backref_node *node; + + node = kzalloc(sizeof(*node), GFP_NOFS); + if (node) { + INIT_LIST_HEAD(&node->list); + INIT_LIST_HEAD(&node->upper); + INIT_LIST_HEAD(&node->lower); + RB_CLEAR_NODE(&node->rb_node); + cache->nr_nodes++; + } + return node; +} + +static void free_backref_node(struct backref_cache *cache, + struct backref_node *node) +{ + if (node) { + cache->nr_nodes--; + kfree(node); + } +} + +static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) +{ + struct backref_edge *edge; + + edge = kzalloc(sizeof(*edge), GFP_NOFS); + if (edge) + cache->nr_edges++; + return edge; } -static void backref_node_init(struct backref_node *node) +static void free_backref_edge(struct backref_cache *cache, + struct backref_edge *edge) { - memset(node, 0, sizeof(*node)); - INIT_LIST_HEAD(&node->upper); - INIT_LIST_HEAD(&node->lower); - RB_CLEAR_NODE(&node->rb_node); + if (edge) { + cache->nr_edges--; + kfree(edge); + } } static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, @@ -250,6 +342,7 @@ static struct backref_node *walk_up_backref(struct backref_node *node, edges[idx++] = edge; node = edge->node[UPPER]; } + BUG_ON(node->detached); *index = idx; return node; } @@ -281,13 +374,18 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[], return NULL; } +static void unlock_node_buffer(struct backref_node *node) +{ + if (node->locked) { + btrfs_tree_unlock(node->eb); + node->locked = 0; + } +} + static void drop_node_buffer(struct backref_node *node) { if (node->eb) { - if (node->locked) { - btrfs_tree_unlock(node->eb); - node->locked = 0; - } + unlock_node_buffer(node); free_extent_buffer(node->eb); node->eb = NULL; } @@ -296,14 +394,14 @@ static void drop_node_buffer(struct backref_node *node) static void drop_backref_node(struct backref_cache *tree, struct backref_node *node) { - BUG_ON(!node->lowest); BUG_ON(!list_empty(&node->upper)); drop_node_buffer(node); + list_del(&node->list); list_del(&node->lower); - - rb_erase(&node->rb_node, &tree->rb_root); - kfree(node); + if (!RB_EMPTY_NODE(&node->rb_node)) + rb_erase(&node->rb_node, &tree->rb_root); + free_backref_node(tree, node); } /* @@ -318,27 +416,121 @@ static void remove_backref_node(struct backref_cache *cache, if (!node) return; - BUG_ON(!node->lowest); + BUG_ON(!node->lowest && !node->detached); while (!list_empty(&node->upper)) { edge = list_entry(node->upper.next, struct backref_edge, list[LOWER]); upper = edge->node[UPPER]; list_del(&edge->list[LOWER]); list_del(&edge->list[UPPER]); - kfree(edge); + free_backref_edge(cache, edge); + + if (RB_EMPTY_NODE(&upper->rb_node)) { + BUG_ON(!list_empty(&node->upper)); + drop_backref_node(cache, node); + node = upper; + node->lowest = 1; + continue; + } /* - * add the node to pending list if no other + * add the node to leaf node list if no other * child block cached. */ if (list_empty(&upper->lower)) { - list_add_tail(&upper->lower, - &cache->pending[upper->level]); + list_add_tail(&upper->lower, &cache->leaves); upper->lowest = 1; } } + drop_backref_node(cache, node); } +static void update_backref_node(struct backref_cache *cache, + struct backref_node *node, u64 bytenr) +{ + struct rb_node *rb_node; + rb_erase(&node->rb_node, &cache->rb_root); + node->bytenr = bytenr; + rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); + BUG_ON(rb_node); +} + +/* + * update backref cache after a transaction commit + */ +static int update_backref_cache(struct btrfs_trans_handle *trans, + struct backref_cache *cache) +{ + struct backref_node *node; + int level = 0; + + if (cache->last_trans == 0) { + cache->last_trans = trans->transid; + return 0; + } + + if (cache->last_trans == trans->transid) + return 0; + + /* + * detached nodes are used to avoid unnecessary backref + * lookup. transaction commit changes the extent tree. + * so the detached nodes are no longer useful. + */ + while (!list_empty(&cache->detached)) { + node = list_entry(cache->detached.next, + struct backref_node, list); + remove_backref_node(cache, node); + } + + while (!list_empty(&cache->changed)) { + node = list_entry(cache->changed.next, + struct backref_node, list); + list_del_init(&node->list); + BUG_ON(node->pending); + update_backref_node(cache, node, node->new_bytenr); + } + + /* + * some nodes can be left in the pending list if there were + * errors during processing the pending nodes. + */ + for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + list_for_each_entry(node, &cache->pending[level], list) { + BUG_ON(!node->pending); + if (node->bytenr == node->new_bytenr) + continue; + update_backref_node(cache, node, node->new_bytenr); + } + } + + cache->last_trans = 0; + return 1; +} + +static int should_ignore_root(struct btrfs_root *root) +{ + struct btrfs_root *reloc_root; + + if (!root->ref_cows) + return 0; + + reloc_root = root->reloc_root; + if (!reloc_root) + return 0; + + if (btrfs_root_last_snapshot(&reloc_root->root_item) == + root->fs_info->running_transaction->transid - 1) + return 0; + /* + * if there is reloc tree and it was created in previous + * transaction backref lookup can find the reloc tree, + * so backref node for the fs tree root is useless for + * relocation. + */ + return 1; +} + /* * find reloc tree by address of tree root */ @@ -453,11 +645,12 @@ int find_inline_backref(struct extent_buffer *leaf, int slot, * for all upper level blocks that directly/indirectly reference the * block are also cached. */ -static struct backref_node *build_backref_tree(struct reloc_control *rc, - struct backref_cache *cache, - struct btrfs_key *node_key, - int level, u64 bytenr) +static noinline_for_stack +struct backref_node *build_backref_tree(struct reloc_control *rc, + struct btrfs_key *node_key, + int level, u64 bytenr) { + struct backref_cache *cache = &rc->backref_cache; struct btrfs_path *path1; struct btrfs_path *path2; struct extent_buffer *eb; @@ -473,6 +666,8 @@ static struct backref_node *build_backref_tree(struct reloc_control *rc, unsigned long end; unsigned long ptr; LIST_HEAD(list); + LIST_HEAD(useless); + int cowonly; int ret; int err = 0; @@ -483,15 +678,13 @@ static struct backref_node *build_backref_tree(struct reloc_control *rc, goto out; } - node = kmalloc(sizeof(*node), GFP_NOFS); + node = alloc_backref_node(cache); if (!node) { err = -ENOMEM; goto out; } - backref_node_init(node); node->bytenr = bytenr; - node->owner = 0; node->level = level; node->lowest = 1; cur = node; @@ -587,17 +780,20 @@ again: #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY || key.type == BTRFS_EXTENT_REF_V0_KEY) { - if (key.objectid == key.offset && - key.type == BTRFS_EXTENT_REF_V0_KEY) { + if (key.type == BTRFS_EXTENT_REF_V0_KEY) { struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(eb, path1->slots[0], struct btrfs_extent_ref_v0); root = find_tree_root(rc, eb, ref0); - if (root) - cur->root = root; - else - cur->old_root = 1; - break; + if (!root->ref_cows) + cur->cowonly = 1; + if (key.objectid == key.offset) { + if (root && !should_ignore_root(root)) + cur->root = root; + else + list_add(&cur->list, &useless); + break; + } } #else BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); @@ -614,22 +810,20 @@ again: break; } - edge = kzalloc(sizeof(*edge), GFP_NOFS); + edge = alloc_backref_edge(cache); if (!edge) { err = -ENOMEM; goto out; } rb_node = tree_search(&cache->rb_root, key.offset); if (!rb_node) { - upper = kmalloc(sizeof(*upper), GFP_NOFS); + upper = alloc_backref_node(cache); if (!upper) { - kfree(edge); + free_backref_edge(cache, edge); err = -ENOMEM; goto out; } - backref_node_init(upper); upper->bytenr = key.offset; - upper->owner = 0; upper->level = cur->level + 1; /* * backrefs for the upper level block isn't @@ -639,11 +833,12 @@ again: } else { upper = rb_entry(rb_node, struct backref_node, rb_node); + BUG_ON(!upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); } - list_add(&edge->list[LOWER], &cur->upper); - edge->node[UPPER] = upper; + list_add_tail(&edge->list[LOWER], &cur->upper); edge->node[LOWER] = cur; + edge->node[UPPER] = upper; goto next; } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { @@ -657,11 +852,17 @@ again: goto out; } + if (!root->ref_cows) + cur->cowonly = 1; + if (btrfs_root_level(&root->root_item) == cur->level) { /* tree root */ BUG_ON(btrfs_root_bytenr(&root->root_item) != cur->bytenr); - cur->root = root; + if (should_ignore_root(root)) + list_add(&cur->list, &useless); + else + cur->root = root; break; } @@ -692,11 +893,14 @@ again: if (!path2->nodes[level]) { BUG_ON(btrfs_root_bytenr(&root->root_item) != lower->bytenr); - lower->root = root; + if (should_ignore_root(root)) + list_add(&lower->list, &useless); + else + lower->root = root; break; } - edge = kzalloc(sizeof(*edge), GFP_NOFS); + edge = alloc_backref_edge(cache); if (!edge) { err = -ENOMEM; goto out; @@ -705,16 +909,17 @@ again: eb = path2->nodes[level]; rb_node = tree_search(&cache->rb_root, eb->start); if (!rb_node) { - upper = kmalloc(sizeof(*upper), GFP_NOFS); + upper = alloc_backref_node(cache); if (!upper) { - kfree(edge); + free_backref_edge(cache, edge); err = -ENOMEM; goto out; } - backref_node_init(upper); upper->bytenr = eb->start; upper->owner = btrfs_header_owner(eb); upper->level = lower->level + 1; + if (!root->ref_cows) + upper->cowonly = 1; /* * if we know the block isn't shared @@ -744,10 +949,12 @@ again: rb_node); BUG_ON(!upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); + if (!upper->owner) + upper->owner = btrfs_header_owner(eb); } list_add_tail(&edge->list[LOWER], &lower->upper); - edge->node[UPPER] = upper; edge->node[LOWER] = lower; + edge->node[UPPER] = upper; if (rb_node) break; @@ -785,8 +992,13 @@ next: * into the cache. */ BUG_ON(!node->checked); - rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); - BUG_ON(rb_node); + cowonly = node->cowonly; + if (!cowonly) { + rb_node = tree_insert(&cache->rb_root, node->bytenr, + &node->rb_node); + BUG_ON(rb_node); + list_add_tail(&node->lower, &cache->leaves); + } list_for_each_entry(edge, &node->upper, list[LOWER]) list_add_tail(&edge->list[UPPER], &list); @@ -795,6 +1007,14 @@ next: edge = list_entry(list.next, struct backref_edge, list[UPPER]); list_del_init(&edge->list[UPPER]); upper = edge->node[UPPER]; + if (upper->detached) { + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + free_backref_edge(cache, edge); + if (list_empty(&lower->upper)) + list_add(&lower->list, &useless); + continue; + } if (!RB_EMPTY_NODE(&upper->rb_node)) { if (upper->lowest) { @@ -807,25 +1027,69 @@ next: } BUG_ON(!upper->checked); - rb_node = tree_insert(&cache->rb_root, upper->bytenr, - &upper->rb_node); - BUG_ON(rb_node); + BUG_ON(cowonly != upper->cowonly); + if (!cowonly) { + rb_node = tree_insert(&cache->rb_root, upper->bytenr, + &upper->rb_node); + BUG_ON(rb_node); + } list_add_tail(&edge->list[UPPER], &upper->lower); list_for_each_entry(edge, &upper->upper, list[LOWER]) list_add_tail(&edge->list[UPPER], &list); } + /* + * process useless backref nodes. backref nodes for tree leaves + * are deleted from the cache. backref nodes for upper level + * tree blocks are left in the cache to avoid unnecessary backref + * lookup. + */ + while (!list_empty(&useless)) { + upper = list_entry(useless.next, struct backref_node, list); + list_del_init(&upper->list); + BUG_ON(!list_empty(&upper->upper)); + if (upper == node) + node = NULL; + if (upper->lowest) { + list_del_init(&upper->lower); + upper->lowest = 0; + } + while (!list_empty(&upper->lower)) { + edge = list_entry(upper->lower.next, + struct backref_edge, list[UPPER]); + list_del(&edge->list[UPPER]); + list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; + free_backref_edge(cache, edge); + + if (list_empty(&lower->upper)) + list_add(&lower->list, &useless); + } + __mark_block_processed(rc, upper); + if (upper->level > 0) { + list_add(&upper->list, &cache->detached); + upper->detached = 1; + } else { + rb_erase(&upper->rb_node, &cache->rb_root); + free_backref_node(cache, upper); + } + } out: btrfs_free_path(path1); btrfs_free_path(path2); if (err) { - INIT_LIST_HEAD(&list); + while (!list_empty(&useless)) { + lower = list_entry(useless.next, + struct backref_node, upper); + list_del_init(&lower->upper); + } upper = node; + INIT_LIST_HEAD(&list); while (upper) { if (RB_EMPTY_NODE(&upper->rb_node)) { list_splice_tail(&upper->upper, &list); - kfree(upper); + free_backref_node(cache, upper); } if (list_empty(&list)) @@ -833,14 +1097,103 @@ out: edge = list_entry(list.next, struct backref_edge, list[LOWER]); + list_del(&edge->list[LOWER]); upper = edge->node[UPPER]; - kfree(edge); + free_backref_edge(cache, edge); } return ERR_PTR(err); } + BUG_ON(node && node->detached); return node; } +/* + * helper to add backref node for the newly created snapshot. + * the backref node is created by cloning backref node that + * corresponds to root of source tree + */ +static int clone_backref_node(struct btrfs_trans_handle *trans, + struct reloc_control *rc, + struct btrfs_root *src, + struct btrfs_root *dest) +{ + struct btrfs_root *reloc_root = src->reloc_root; + struct backref_cache *cache = &rc->backref_cache; + struct backref_node *node = NULL; + struct backref_node *new_node; + struct backref_edge *edge; + struct backref_edge *new_edge; + struct rb_node *rb_node; + + if (cache->last_trans > 0) + update_backref_cache(trans, cache); + + rb_node = tree_search(&cache->rb_root, src->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct backref_node, rb_node); + if (node->detached) + node = NULL; + else + BUG_ON(node->new_bytenr != reloc_root->node->start); + } + + if (!node) { + rb_node = tree_search(&cache->rb_root, + reloc_root->commit_root->start); + if (rb_node) { + node = rb_entry(rb_node, struct backref_node, + rb_node); + BUG_ON(node->detached); + } + } + + if (!node) + return 0; + + new_node = alloc_backref_node(cache); + if (!new_node) + return -ENOMEM; + + new_node->bytenr = dest->node->start; + new_node->level = node->level; + new_node->lowest = node->lowest; + new_node->root = dest; + + if (!node->lowest) { + list_for_each_entry(edge, &node->lower, list[UPPER]) { + new_edge = alloc_backref_edge(cache); + if (!new_edge) + goto fail; + + new_edge->node[UPPER] = new_node; + new_edge->node[LOWER] = edge->node[LOWER]; + list_add_tail(&new_edge->list[UPPER], + &new_node->lower); + } + } + + rb_node = tree_insert(&cache->rb_root, new_node->bytenr, + &new_node->rb_node); + BUG_ON(rb_node); + + if (!new_node->lowest) { + list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) { + list_add_tail(&new_edge->list[LOWER], + &new_edge->node[LOWER]->upper); + } + } + return 0; +fail: + while (!list_empty(&new_node->lower)) { + new_edge = list_entry(new_node->lower.next, + struct backref_edge, list[UPPER]); + list_del(&new_edge->list[UPPER]); + free_backref_edge(cache, new_edge); + } + free_backref_node(cache, new_node); + return -ENOMEM; +} + /* * helper to add 'address of tree root -> reloc tree' mapping */ @@ -901,12 +1254,8 @@ static int __update_reloc_root(struct btrfs_root *root, int del) return 0; } -/* - * create reloc tree for a given fs tree. reloc tree is just a - * snapshot of the fs tree with special root objectid. - */ -int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 objectid) { struct btrfs_root *reloc_root; struct extent_buffer *eb; @@ -914,36 +1263,45 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, struct btrfs_key root_key; int ret; - if (root->reloc_root) { - reloc_root = root->reloc_root; - reloc_root->last_trans = trans->transid; - return 0; - } - - if (!root->fs_info->reloc_ctl || - !root->fs_info->reloc_ctl->create_reloc_root || - root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) - return 0; - root_item = kmalloc(sizeof(*root_item), GFP_NOFS); BUG_ON(!root_item); root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; root_key.type = BTRFS_ROOT_ITEM_KEY; - root_key.offset = root->root_key.objectid; + root_key.offset = objectid; - ret = btrfs_copy_root(trans, root, root->commit_root, &eb, - BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(ret); + if (root->root_key.objectid == objectid) { + /* called by btrfs_init_reloc_root */ + ret = btrfs_copy_root(trans, root, root->commit_root, &eb, + BTRFS_TREE_RELOC_OBJECTID); + BUG_ON(ret); + + btrfs_set_root_last_snapshot(&root->root_item, + trans->transid - 1); + } else { + /* + * called by btrfs_reloc_post_snapshot_hook. + * the source tree is a reloc tree, all tree blocks + * modified after it was created have RELOC flag + * set in their headers. so it's OK to not update + * the 'last_snapshot'. + */ + ret = btrfs_copy_root(trans, root, root->node, &eb, + BTRFS_TREE_RELOC_OBJECTID); + BUG_ON(ret); + } - btrfs_set_root_last_snapshot(&root->root_item, trans->transid - 1); memcpy(root_item, &root->root_item, sizeof(*root_item)); - btrfs_set_root_refs(root_item, 1); btrfs_set_root_bytenr(root_item, eb->start); btrfs_set_root_level(root_item, btrfs_header_level(eb)); btrfs_set_root_generation(root_item, trans->transid); - memset(&root_item->drop_progress, 0, sizeof(struct btrfs_disk_key)); - root_item->drop_level = 0; + + if (root->root_key.objectid == objectid) { + btrfs_set_root_refs(root_item, 0); + memset(&root_item->drop_progress, 0, + sizeof(struct btrfs_disk_key)); + root_item->drop_level = 0; + } btrfs_tree_unlock(eb); free_extent_buffer(eb); @@ -957,6 +1315,37 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, &root_key); BUG_ON(IS_ERR(reloc_root)); reloc_root->last_trans = trans->transid; + return reloc_root; +} + +/* + * create reloc tree for a given fs tree. reloc tree is just a + * snapshot of the fs tree with special root objectid. + */ +int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_root *reloc_root; + struct reloc_control *rc = root->fs_info->reloc_ctl; + int clear_rsv = 0; + + if (root->reloc_root) { + reloc_root = root->reloc_root; + reloc_root->last_trans = trans->transid; + return 0; + } + + if (!rc || !rc->create_reloc_tree || + root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) + return 0; + + if (!trans->block_rsv) { + trans->block_rsv = rc->block_rsv; + clear_rsv = 1; + } + reloc_root = create_reloc_root(trans, root, root->root_key.objectid); + if (clear_rsv) + trans->block_rsv = NULL; __add_reloc_root(reloc_root); root->reloc_root = reloc_root; @@ -980,7 +1369,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, reloc_root = root->reloc_root; root_item = &reloc_root->root_item; - if (btrfs_root_refs(root_item) == 0) { + if (root->fs_info->reloc_ctl->merge_reloc_tree && + btrfs_root_refs(root_item) == 0) { root->reloc_root = NULL; del = 1; } @@ -1102,8 +1492,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, goto out; } - if (new_bytenr) - *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); + *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); ret = 0; out: btrfs_free_path(path); @@ -1114,19 +1503,18 @@ out: * update file extent items in the tree leaf to point to * the new locations. */ -static int replace_file_extents(struct btrfs_trans_handle *trans, - struct reloc_control *rc, - struct btrfs_root *root, - struct extent_buffer *leaf, - struct list_head *inode_list) +static noinline_for_stack +int replace_file_extents(struct btrfs_trans_handle *trans, + struct reloc_control *rc, + struct btrfs_root *root, + struct extent_buffer *leaf) { struct btrfs_key key; struct btrfs_file_extent_item *fi; struct inode *inode = NULL; - struct inodevec *ivec = NULL; u64 parent; u64 bytenr; - u64 new_bytenr; + u64 new_bytenr = 0; u64 num_bytes; u64 end; u32 nritems; @@ -1166,21 +1554,12 @@ static int replace_file_extents(struct btrfs_trans_handle *trans, * to complete and drop the extent cache */ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { - if (!ivec || ivec->nr == INODEVEC_SIZE) { - ivec = kmalloc(sizeof(*ivec), GFP_NOFS); - BUG_ON(!ivec); - ivec->nr = 0; - list_add_tail(&ivec->list, inode_list); - } if (first) { inode = find_next_inode(root, key.objectid); - if (inode) - ivec->inode[ivec->nr++] = inode; first = 0; } else if (inode && inode->i_ino < key.objectid) { + btrfs_add_delayed_iput(inode); inode = find_next_inode(root, key.objectid); - if (inode) - ivec->inode[ivec->nr++] = inode; } if (inode && inode->i_ino == key.objectid) { end = key.offset + @@ -1204,8 +1583,10 @@ static int replace_file_extents(struct btrfs_trans_handle *trans, ret = get_new_location(rc->data_inode, &new_bytenr, bytenr, num_bytes); - if (ret > 0) + if (ret > 0) { + WARN_ON(1); continue; + } BUG_ON(ret < 0); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr); @@ -1225,6 +1606,8 @@ static int replace_file_extents(struct btrfs_trans_handle *trans, } if (dirty) btrfs_mark_buffer_dirty(leaf); + if (inode) + btrfs_add_delayed_iput(inode); return 0; } @@ -1248,11 +1631,11 @@ int memcmp_node_keys(struct extent_buffer *eb, int slot, * if no block got replaced, 0 is returned. if there are other * errors, a negative error number is returned. */ -static int replace_path(struct btrfs_trans_handle *trans, - struct btrfs_root *dest, struct btrfs_root *src, - struct btrfs_path *path, struct btrfs_key *next_key, - struct extent_buffer **leaf, - int lowest_level, int max_level) +static noinline_for_stack +int replace_path(struct btrfs_trans_handle *trans, + struct btrfs_root *dest, struct btrfs_root *src, + struct btrfs_path *path, struct btrfs_key *next_key, + int lowest_level, int max_level) { struct extent_buffer *eb; struct extent_buffer *parent; @@ -1263,16 +1646,16 @@ static int replace_path(struct btrfs_trans_handle *trans, u64 new_ptr_gen; u64 last_snapshot; u32 blocksize; + int cow = 0; int level; int ret; int slot; BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(lowest_level > 1 && leaf); last_snapshot = btrfs_root_last_snapshot(&src->root_item); - +again: slot = path->slots[lowest_level]; btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); @@ -1286,8 +1669,10 @@ static int replace_path(struct btrfs_trans_handle *trans, return 0; } - ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); - BUG_ON(ret); + if (cow) { + ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); + BUG_ON(ret); + } btrfs_set_lock_blocking(eb); if (next_key) { @@ -1331,7 +1716,7 @@ static int replace_path(struct btrfs_trans_handle *trans, if (new_bytenr == 0 || old_ptr_gen > last_snapshot || memcmp_node_keys(parent, slot, path, level)) { - if (level <= lowest_level && !leaf) { + if (level <= lowest_level) { ret = 0; break; } @@ -1339,16 +1724,12 @@ static int replace_path(struct btrfs_trans_handle *trans, eb = read_tree_block(dest, old_bytenr, blocksize, old_ptr_gen); btrfs_tree_lock(eb); - ret = btrfs_cow_block(trans, dest, eb, parent, - slot, &eb); - BUG_ON(ret); - btrfs_set_lock_blocking(eb); - - if (level <= lowest_level) { - *leaf = eb; - ret = 0; - break; + if (cow) { + ret = btrfs_cow_block(trans, dest, eb, parent, + slot, &eb); + BUG_ON(ret); } + btrfs_set_lock_blocking(eb); btrfs_tree_unlock(parent); free_extent_buffer(parent); @@ -1357,6 +1738,13 @@ static int replace_path(struct btrfs_trans_handle *trans, continue; } + if (!cow) { + btrfs_tree_unlock(parent); + free_extent_buffer(parent); + cow = 1; + goto again; + } + btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); btrfs_release_path(src, path); @@ -1562,20 +1950,6 @@ static int invalidate_extent_cache(struct btrfs_root *root, return 0; } -static void put_inodes(struct list_head *list) -{ - struct inodevec *ivec; - while (!list_empty(list)) { - ivec = list_entry(list->next, struct inodevec, list); - list_del(&ivec->list); - while (ivec->nr > 0) { - ivec->nr--; - iput(ivec->inode[ivec->nr]); - } - kfree(ivec); - } -} - static int find_next_key(struct btrfs_path *path, int level, struct btrfs_key *key) @@ -1608,13 +1982,14 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, struct btrfs_root *reloc_root; struct btrfs_root_item *root_item; struct btrfs_path *path; - struct extent_buffer *leaf = NULL; + struct extent_buffer *leaf; unsigned long nr; int level; int max_level; int replaced = 0; int ret; int err = 0; + u32 min_reserved; path = btrfs_alloc_path(); if (!path) @@ -1648,34 +2023,23 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, btrfs_unlock_up_safe(path, 0); } - if (level == 0 && rc->stage == UPDATE_DATA_PTRS) { - trans = btrfs_start_transaction(root, 1); + min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; + memset(&next_key, 0, sizeof(next_key)); - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &key, 0); - btrfs_release_path(reloc_root, path); + while (1) { + trans = btrfs_start_transaction(root, 0); + trans->block_rsv = rc->block_rsv; - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) { - err = ret; - goto out; + ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, + min_reserved, 0); + if (ret) { + BUG_ON(ret != -EAGAIN); + ret = btrfs_commit_transaction(trans, root); + BUG_ON(ret); + continue; } - leaf = path->nodes[0]; - btrfs_unlock_up_safe(path, 1); - ret = replace_file_extents(trans, rc, root, leaf, - &inode_list); - if (ret < 0) - err = ret; - goto out; - } - - memset(&next_key, 0, sizeof(next_key)); - - while (1) { - leaf = NULL; replaced = 0; - trans = btrfs_start_transaction(root, 1); max_level = level; ret = walk_down_reloc_tree(reloc_root, path, &level); @@ -1689,14 +2053,9 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, if (!find_next_key(path, level, &key) && btrfs_comp_cpu_keys(&next_key, &key) >= 0) { ret = 0; - } else if (level == 1 && rc->stage == UPDATE_DATA_PTRS) { - ret = replace_path(trans, root, reloc_root, - path, &next_key, &leaf, - level, max_level); } else { - ret = replace_path(trans, root, reloc_root, - path, &next_key, NULL, - level, max_level); + ret = replace_path(trans, root, reloc_root, path, + &next_key, level, max_level); } if (ret < 0) { err = ret; @@ -1708,16 +2067,6 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, btrfs_node_key_to_cpu(path->nodes[level], &key, path->slots[level]); replaced = 1; - } else if (leaf) { - /* - * no block got replaced, try replacing file extents - */ - btrfs_item_key_to_cpu(leaf, &key, 0); - ret = replace_file_extents(trans, rc, root, leaf, - &inode_list); - btrfs_tree_unlock(leaf); - free_extent_buffer(leaf); - BUG_ON(ret < 0); } ret = walk_up_reloc_tree(reloc_root, path, &level); @@ -1734,15 +2083,10 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, root_item->drop_level = level; nr = trans->blocks_used; - btrfs_end_transaction(trans, root); + btrfs_end_transaction_throttle(trans, root); btrfs_btree_balance_dirty(root, nr); - /* - * put inodes outside transaction, otherwise we may deadlock. - */ - put_inodes(&inode_list); - if (replaced && rc->stage == UPDATE_DATA_PTRS) invalidate_extent_cache(root, &key, &next_key); } @@ -1765,87 +2109,125 @@ out: sizeof(root_item->drop_progress)); root_item->drop_level = 0; btrfs_set_root_refs(root_item, 0); + btrfs_update_reloc_root(trans, root); } nr = trans->blocks_used; - btrfs_end_transaction(trans, root); + btrfs_end_transaction_throttle(trans, root); btrfs_btree_balance_dirty(root, nr); - put_inodes(&inode_list); - if (replaced && rc->stage == UPDATE_DATA_PTRS) invalidate_extent_cache(root, &key, &next_key); return err; } -/* - * callback for the work threads. - * this function merges reloc tree with corresponding fs tree, - * and then drops the reloc tree. - */ -static void merge_func(struct btrfs_work *work) +static noinline_for_stack +int prepare_to_merge(struct reloc_control *rc, int err) { - struct btrfs_trans_handle *trans; - struct btrfs_root *root; + struct btrfs_root *root = rc->extent_root; struct btrfs_root *reloc_root; - struct async_merge *async; + struct btrfs_trans_handle *trans; + LIST_HEAD(reloc_roots); + u64 num_bytes = 0; + int ret; + int retries = 0; + + mutex_lock(&root->fs_info->trans_mutex); + rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; + rc->merging_rsv_size += rc->nodes_relocated * 2; + mutex_unlock(&root->fs_info->trans_mutex); +again: + if (!err) { + num_bytes = rc->merging_rsv_size; + ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv, + num_bytes, &retries); + if (ret) + err = ret; + } - async = container_of(work, struct async_merge, work); - reloc_root = async->root; + trans = btrfs_join_transaction(rc->extent_root, 1); + + if (!err) { + if (num_bytes != rc->merging_rsv_size) { + btrfs_end_transaction(trans, rc->extent_root); + btrfs_block_rsv_release(rc->extent_root, + rc->block_rsv, num_bytes); + retries = 0; + goto again; + } + } + + rc->merge_reloc_tree = 1; + + while (!list_empty(&rc->reloc_roots)) { + reloc_root = list_entry(rc->reloc_roots.next, + struct btrfs_root, root_list); + list_del_init(&reloc_root->root_list); - if (btrfs_root_refs(&reloc_root->root_item) > 0) { root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset); BUG_ON(IS_ERR(root)); BUG_ON(root->reloc_root != reloc_root); - merge_reloc_root(async->rc, root); - - trans = btrfs_start_transaction(root, 1); + /* + * set reference count to 1, so btrfs_recover_relocation + * knows it should resumes merging + */ + if (!err) + btrfs_set_root_refs(&reloc_root->root_item, 1); btrfs_update_reloc_root(trans, root); - btrfs_end_transaction(trans, root); - } - btrfs_drop_snapshot(reloc_root, 0); + list_add(&reloc_root->root_list, &reloc_roots); + } - if (atomic_dec_and_test(async->num_pending)) - complete(async->done); + list_splice(&reloc_roots, &rc->reloc_roots); - kfree(async); + if (!err) + btrfs_commit_transaction(trans, rc->extent_root); + else + btrfs_end_transaction(trans, rc->extent_root); + return err; } -static int merge_reloc_roots(struct reloc_control *rc) +static noinline_for_stack +int merge_reloc_roots(struct reloc_control *rc) { - struct async_merge *async; struct btrfs_root *root; - struct completion done; - atomic_t num_pending; + struct btrfs_root *reloc_root; + LIST_HEAD(reloc_roots); + int found = 0; + int ret; +again: + root = rc->extent_root; + mutex_lock(&root->fs_info->trans_mutex); + list_splice_init(&rc->reloc_roots, &reloc_roots); + mutex_unlock(&root->fs_info->trans_mutex); - init_completion(&done); - atomic_set(&num_pending, 1); + while (!list_empty(&reloc_roots)) { + found = 1; + reloc_root = list_entry(reloc_roots.next, + struct btrfs_root, root_list); - while (!list_empty(&rc->reloc_roots)) { - root = list_entry(rc->reloc_roots.next, - struct btrfs_root, root_list); - list_del_init(&root->root_list); + if (btrfs_root_refs(&reloc_root->root_item) > 0) { + root = read_fs_root(reloc_root->fs_info, + reloc_root->root_key.offset); + BUG_ON(IS_ERR(root)); + BUG_ON(root->reloc_root != reloc_root); - async = kmalloc(sizeof(*async), GFP_NOFS); - BUG_ON(!async); - async->work.func = merge_func; - async->work.flags = 0; - async->rc = rc; - async->root = root; - async->done = &done; - async->num_pending = &num_pending; - atomic_inc(&num_pending); - btrfs_queue_worker(&rc->workers, &async->work); + ret = merge_reloc_root(rc, root); + BUG_ON(ret); + } else { + list_del_init(&reloc_root->root_list); + } + btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0); } - if (!atomic_dec_and_test(&num_pending)) - wait_for_completion(&done); - + if (found) { + found = 0; + goto again; + } BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); return 0; } @@ -1876,119 +2258,169 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans, return btrfs_record_root_in_trans(trans, root); } -/* - * select one tree from trees that references the block. - * for blocks in refernce counted trees, we preper reloc tree. - * if no reloc tree found and reloc_only is true, NULL is returned. - */ -static struct btrfs_root *__select_one_root(struct btrfs_trans_handle *trans, - struct backref_node *node, - struct backref_edge *edges[], - int *nr, int reloc_only) +static noinline_for_stack +struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, + struct reloc_control *rc, + struct backref_node *node, + struct backref_edge *edges[], int *nr) { struct backref_node *next; struct btrfs_root *root; - int index; - int loop = 0; -again: - index = 0; + int index = 0; + next = node; while (1) { cond_resched(); next = walk_up_backref(next, edges, &index); root = next->root; - if (!root) { - BUG_ON(!node->old_root); - goto skip; - } - - /* no other choice for non-refernce counted tree */ - if (!root->ref_cows) { - BUG_ON(reloc_only); - break; - } + BUG_ON(!root); + BUG_ON(!root->ref_cows); if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { record_reloc_root_in_trans(trans, root); break; } - if (loop) { - btrfs_record_root_in_trans(trans, root); + btrfs_record_root_in_trans(trans, root); + root = root->reloc_root; + + if (next->new_bytenr != root->node->start) { + BUG_ON(next->new_bytenr); + BUG_ON(!list_empty(&next->list)); + next->new_bytenr = root->node->start; + next->root = root; + list_add_tail(&next->list, + &rc->backref_cache.changed); + __mark_block_processed(rc, next); break; } - if (reloc_only || next != node) { - if (!root->reloc_root) - btrfs_record_root_in_trans(trans, root); - root = root->reloc_root; - /* - * if the reloc tree was created in current - * transation, there is no node in backref tree - * corresponds to the root of the reloc tree. - */ - if (btrfs_root_last_snapshot(&root->root_item) == - trans->transid - 1) - break; - } -skip: + WARN_ON(1); root = NULL; next = walk_down_backref(edges, &index); if (!next || next->level <= node->level) break; } + if (!root) + return NULL; - if (!root && !loop && !reloc_only) { - loop = 1; - goto again; + *nr = index; + next = node; + /* setup backref node path for btrfs_reloc_cow_block */ + while (1) { + rc->backref_cache.path[next->level] = next; + if (--index < 0) + break; + next = edges[index]->node[UPPER]; } - - if (root) - *nr = index; - else - *nr = 0; - return root; } +/* + * select a tree root for relocation. return NULL if the block + * is reference counted. we should use do_relocation() in this + * case. return a tree root pointer if the block isn't reference + * counted. return -ENOENT if the block is root of reloc tree. + */ static noinline_for_stack struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans, struct backref_node *node) { + struct backref_node *next; + struct btrfs_root *root; + struct btrfs_root *fs_root = NULL; struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; - int nr; - return __select_one_root(trans, node, edges, &nr, 0); + int index = 0; + + next = node; + while (1) { + cond_resched(); + next = walk_up_backref(next, edges, &index); + root = next->root; + BUG_ON(!root); + + /* no other choice for non-refernce counted tree */ + if (!root->ref_cows) + return root; + + if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) + fs_root = root; + + if (next != node) + return NULL; + + next = walk_down_backref(edges, &index); + if (!next || next->level <= node->level) + break; + } + + if (!fs_root) + return ERR_PTR(-ENOENT); + return fs_root; } static noinline_for_stack -struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans, - struct backref_node *node, - struct backref_edge *edges[], int *nr) +u64 calcu_metadata_size(struct reloc_control *rc, + struct backref_node *node, int reserve) { - return __select_one_root(trans, node, edges, nr, 1); + struct backref_node *next = node; + struct backref_edge *edge; + struct backref_edge *edges[BTRFS_MAX_LEVEL - 1]; + u64 num_bytes = 0; + int index = 0; + + BUG_ON(reserve && node->processed); + + while (next) { + cond_resched(); + while (1) { + if (next->processed && (reserve || next != node)) + break; + + num_bytes += btrfs_level_size(rc->extent_root, + next->level); + + if (list_empty(&next->upper)) + break; + + edge = list_entry(next->upper.next, + struct backref_edge, list[LOWER]); + edges[index++] = edge; + next = edge->node[UPPER]; + } + next = walk_down_backref(edges, &index); + } + return num_bytes; } -static void grab_path_buffers(struct btrfs_path *path, - struct backref_node *node, - struct backref_edge *edges[], int nr) +static int reserve_metadata_space(struct btrfs_trans_handle *trans, + struct reloc_control *rc, + struct backref_node *node) { - int i = 0; - while (1) { - drop_node_buffer(node); - node->eb = path->nodes[node->level]; - BUG_ON(!node->eb); - if (path->locks[node->level]) - node->locked = 1; - path->nodes[node->level] = NULL; - path->locks[node->level] = 0; - - if (i >= nr) - break; + struct btrfs_root *root = rc->extent_root; + u64 num_bytes; + int ret; - edges[i]->blockptr = node->eb->start; - node = edges[i]->node[UPPER]; - i++; + num_bytes = calcu_metadata_size(rc, node, 1) * 2; + + trans->block_rsv = rc->block_rsv; + ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes, + &rc->block_rsv_retries); + if (ret) { + if (ret == -EAGAIN) + rc->commit_transaction = 1; + return ret; } + + rc->block_rsv_retries = 0; + return 0; +} + +static void release_metadata_space(struct reloc_control *rc, + struct backref_node *node) +{ + u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2; + btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes); } /* @@ -1999,6 +2431,7 @@ static void grab_path_buffers(struct btrfs_path *path, * in that case this function just updates pointers. */ static int do_relocation(struct btrfs_trans_handle *trans, + struct reloc_control *rc, struct backref_node *node, struct btrfs_key *key, struct btrfs_path *path, int lowest) @@ -2019,18 +2452,25 @@ static int do_relocation(struct btrfs_trans_handle *trans, BUG_ON(lowest && node->eb); path->lowest_level = node->level + 1; + rc->backref_cache.path[node->level] = node; list_for_each_entry(edge, &node->upper, list[LOWER]) { cond_resched(); - if (node->eb && node->eb->start == edge->blockptr) - continue; upper = edge->node[UPPER]; - root = select_reloc_root(trans, upper, edges, &nr); - if (!root) - continue; - - if (upper->eb && !upper->locked) + root = select_reloc_root(trans, rc, upper, edges, &nr); + BUG_ON(!root); + + if (upper->eb && !upper->locked) { + if (!lowest) { + ret = btrfs_bin_search(upper->eb, key, + upper->level, &slot); + BUG_ON(ret); + bytenr = btrfs_node_blockptr(upper->eb, slot); + if (node->eb->start == bytenr) + goto next; + } drop_node_buffer(upper); + } if (!upper->eb) { ret = btrfs_search_slot(trans, root, key, path, 0, 1); @@ -2040,11 +2480,17 @@ static int do_relocation(struct btrfs_trans_handle *trans, } BUG_ON(ret > 0); - slot = path->slots[upper->level]; + if (!upper->eb) { + upper->eb = path->nodes[upper->level]; + path->nodes[upper->level] = NULL; + } else { + BUG_ON(upper->eb != path->nodes[upper->level]); + } - btrfs_unlock_up_safe(path, upper->level + 1); - grab_path_buffers(path, upper, edges, nr); + upper->locked = 1; + path->locks[upper->level] = 0; + slot = path->slots[upper->level]; btrfs_release_path(NULL, path); } else { ret = btrfs_bin_search(upper->eb, key, upper->level, @@ -2053,14 +2499,11 @@ static int do_relocation(struct btrfs_trans_handle *trans, } bytenr = btrfs_node_blockptr(upper->eb, slot); - if (!lowest) { - if (node->eb->start == bytenr) { - btrfs_tree_unlock(upper->eb); - upper->locked = 0; - continue; - } + if (lowest) { + BUG_ON(bytenr != node->bytenr); } else { - BUG_ON(node->bytenr != bytenr); + if (node->eb->start == bytenr) + goto next; } blocksize = btrfs_level_size(root, node->level); @@ -2072,13 +2515,13 @@ static int do_relocation(struct btrfs_trans_handle *trans, if (!node->eb) { ret = btrfs_cow_block(trans, root, eb, upper->eb, slot, &eb); + btrfs_tree_unlock(eb); + free_extent_buffer(eb); if (ret < 0) { err = ret; - break; + goto next; } - btrfs_set_lock_blocking(eb); - node->eb = eb; - node->locked = 1; + BUG_ON(node->eb != eb); } else { btrfs_set_node_blockptr(upper->eb, slot, node->eb->start); @@ -2096,67 +2539,80 @@ static int do_relocation(struct btrfs_trans_handle *trans, ret = btrfs_drop_subtree(trans, root, eb, upper->eb); BUG_ON(ret); } - if (!lowest) { - btrfs_tree_unlock(upper->eb); - upper->locked = 0; - } +next: + if (!upper->pending) + drop_node_buffer(upper); + else + unlock_node_buffer(upper); + if (err) + break; + } + + if (!err && node->pending) { + drop_node_buffer(node); + list_move_tail(&node->list, &rc->backref_cache.changed); + node->pending = 0; } + path->lowest_level = 0; + BUG_ON(err == -ENOSPC); return err; } static int link_to_upper(struct btrfs_trans_handle *trans, + struct reloc_control *rc, struct backref_node *node, struct btrfs_path *path) { struct btrfs_key key; - if (!node->eb || list_empty(&node->upper)) - return 0; btrfs_node_key_to_cpu(node->eb, &key, 0); - return do_relocation(trans, node, &key, path, 0); + return do_relocation(trans, rc, node, &key, path, 0); } static int finish_pending_nodes(struct btrfs_trans_handle *trans, - struct backref_cache *cache, - struct btrfs_path *path) + struct reloc_control *rc, + struct btrfs_path *path, int err) { + LIST_HEAD(list); + struct backref_cache *cache = &rc->backref_cache; struct backref_node *node; int level; int ret; - int err = 0; for (level = 0; level < BTRFS_MAX_LEVEL; level++) { while (!list_empty(&cache->pending[level])) { node = list_entry(cache->pending[level].next, - struct backref_node, lower); - BUG_ON(node->level != level); + struct backref_node, list); + list_move_tail(&node->list, &list); + BUG_ON(!node->pending); - ret = link_to_upper(trans, node, path); - if (ret < 0) - err = ret; - /* - * this remove the node from the pending list and - * may add some other nodes to the level + 1 - * pending list - */ - remove_backref_node(cache, node); + if (!err) { + ret = link_to_upper(trans, rc, node, path); + if (ret < 0) + err = ret; + } } + list_splice_init(&list, &cache->pending[level]); } - BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root)); return err; } static void mark_block_processed(struct reloc_control *rc, - struct backref_node *node) + u64 bytenr, u32 blocksize) +{ + set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, + EXTENT_DIRTY, GFP_NOFS); +} + +static void __mark_block_processed(struct reloc_control *rc, + struct backref_node *node) { u32 blocksize; if (node->level == 0 || in_block_group(node->bytenr, rc->block_group)) { blocksize = btrfs_level_size(rc->extent_root, node->level); - set_extent_bits(&rc->processed_blocks, node->bytenr, - node->bytenr + blocksize - 1, EXTENT_DIRTY, - GFP_NOFS); + mark_block_processed(rc, node->bytenr, blocksize); } node->processed = 1; } @@ -2179,7 +2635,7 @@ static void update_processed_blocks(struct reloc_control *rc, if (next->processed) break; - mark_block_processed(rc, next); + __mark_block_processed(rc, next); if (list_empty(&next->upper)) break; @@ -2202,138 +2658,6 @@ static int tree_block_processed(u64 bytenr, u32 blocksize, return 0; } -/* - * check if there are any file extent pointers in the leaf point to - * data require processing - */ -static int check_file_extents(struct reloc_control *rc, - u64 bytenr, u32 blocksize, u64 ptr_gen) -{ - struct btrfs_key found_key; - struct btrfs_file_extent_item *fi; - struct extent_buffer *leaf; - u32 nritems; - int i; - int ret = 0; - - leaf = read_tree_block(rc->extent_root, bytenr, blocksize, ptr_gen); - - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - cond_resched(); - btrfs_item_key_to_cpu(leaf, &found_key, i); - if (found_key.type != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); - if (bytenr == 0) - continue; - if (in_block_group(bytenr, rc->block_group)) { - ret = 1; - break; - } - } - free_extent_buffer(leaf); - return ret; -} - -/* - * scan child blocks of a given block to find blocks require processing - */ -static int add_child_blocks(struct btrfs_trans_handle *trans, - struct reloc_control *rc, - struct backref_node *node, - struct rb_root *blocks) -{ - struct tree_block *block; - struct rb_node *rb_node; - u64 bytenr; - u64 ptr_gen; - u32 blocksize; - u32 nritems; - int i; - int err = 0; - - nritems = btrfs_header_nritems(node->eb); - blocksize = btrfs_level_size(rc->extent_root, node->level - 1); - for (i = 0; i < nritems; i++) { - cond_resched(); - bytenr = btrfs_node_blockptr(node->eb, i); - ptr_gen = btrfs_node_ptr_generation(node->eb, i); - if (ptr_gen == trans->transid) - continue; - if (!in_block_group(bytenr, rc->block_group) && - (node->level > 1 || rc->stage == MOVE_DATA_EXTENTS)) - continue; - if (tree_block_processed(bytenr, blocksize, rc)) - continue; - - readahead_tree_block(rc->extent_root, - bytenr, blocksize, ptr_gen); - } - - for (i = 0; i < nritems; i++) { - cond_resched(); - bytenr = btrfs_node_blockptr(node->eb, i); - ptr_gen = btrfs_node_ptr_generation(node->eb, i); - if (ptr_gen == trans->transid) - continue; - if (!in_block_group(bytenr, rc->block_group) && - (node->level > 1 || rc->stage == MOVE_DATA_EXTENTS)) - continue; - if (tree_block_processed(bytenr, blocksize, rc)) - continue; - if (!in_block_group(bytenr, rc->block_group) && - !check_file_extents(rc, bytenr, blocksize, ptr_gen)) - continue; - - block = kmalloc(sizeof(*block), GFP_NOFS); - if (!block) { - err = -ENOMEM; - break; - } - block->bytenr = bytenr; - btrfs_node_key_to_cpu(node->eb, &block->key, i); - block->level = node->level - 1; - block->key_ready = 1; - rb_node = tree_insert(blocks, block->bytenr, &block->rb_node); - BUG_ON(rb_node); - } - if (err) - free_block_list(blocks); - return err; -} - -/* - * find adjacent blocks require processing - */ -static noinline_for_stack -int add_adjacent_blocks(struct btrfs_trans_handle *trans, - struct reloc_control *rc, - struct backref_cache *cache, - struct rb_root *blocks, int level, - struct backref_node **upper) -{ - struct backref_node *node; - int ret = 0; - - WARN_ON(!list_empty(&cache->pending[level])); - - if (list_empty(&cache->pending[level + 1])) - return 1; - - node = list_entry(cache->pending[level + 1].next, - struct backref_node, lower); - if (node->eb) - ret = add_child_blocks(trans, rc, node, blocks); - - *upper = node; - return ret; -} - static int get_tree_block_key(struct reloc_control *rc, struct tree_block *block) { @@ -2371,40 +2695,53 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, struct btrfs_path *path) { struct btrfs_root *root; - int ret; + int release = 0; + int ret = 0; + if (!node) + return 0; + + BUG_ON(node->processed); root = select_one_root(trans, node); - if (unlikely(!root)) { - rc->found_old_snapshot = 1; + if (root == ERR_PTR(-ENOENT)) { update_processed_blocks(rc, node); - return 0; + goto out; } - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) { - ret = do_relocation(trans, node, key, path, 1); - if (ret < 0) - goto out; - if (node->level == 0 && rc->stage == UPDATE_DATA_PTRS) { - ret = replace_file_extents(trans, rc, root, - node->eb, NULL); - if (ret < 0) - goto out; - } - drop_node_buffer(node); - } else if (!root->ref_cows) { - path->lowest_level = node->level; - ret = btrfs_search_slot(trans, root, key, path, 0, 1); - btrfs_release_path(root, path); - if (ret < 0) + if (!root || root->ref_cows) { + ret = reserve_metadata_space(trans, rc, node); + if (ret) goto out; - } else if (root != node->root) { - WARN_ON(node->level > 0 || rc->stage != UPDATE_DATA_PTRS); + release = 1; } - update_processed_blocks(rc, node); - ret = 0; + if (root) { + if (root->ref_cows) { + BUG_ON(node->new_bytenr); + BUG_ON(!list_empty(&node->list)); + btrfs_record_root_in_trans(trans, root); + root = root->reloc_root; + node->new_bytenr = root->node->start; + node->root = root; + list_add_tail(&node->list, &rc->backref_cache.changed); + } else { + path->lowest_level = node->level; + ret = btrfs_search_slot(trans, root, key, path, 0, 1); + btrfs_release_path(root, path); + if (ret > 0) + ret = 0; + } + if (!ret) + update_processed_blocks(rc, node); + } else { + ret = do_relocation(trans, rc, node, key, path, 1); + } out: - drop_node_buffer(node); + if (ret || node->level == 0 || node->cowonly) { + if (release) + release_metadata_space(rc, node); + remove_backref_node(&rc->backref_cache, node); + } return ret; } @@ -2415,12 +2752,10 @@ static noinline_for_stack int relocate_tree_blocks(struct btrfs_trans_handle *trans, struct reloc_control *rc, struct rb_root *blocks) { - struct backref_cache *cache; struct backref_node *node; struct btrfs_path *path; struct tree_block *block; struct rb_node *rb_node; - int level = -1; int ret; int err = 0; @@ -2428,21 +2763,9 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - cache = kmalloc(sizeof(*cache), GFP_NOFS); - if (!cache) { - btrfs_free_path(path); - return -ENOMEM; - } - - backref_cache_init(cache); - rb_node = rb_first(blocks); while (rb_node) { block = rb_entry(rb_node, struct tree_block, rb_node); - if (level == -1) - level = block->level; - else - BUG_ON(level != block->level); if (!block->key_ready) reada_tree_block(rc, block); rb_node = rb_next(rb_node); @@ -2452,99 +2775,82 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, while (rb_node) { block = rb_entry(rb_node, struct tree_block, rb_node); if (!block->key_ready) - get_tree_block_key(rc, block); - rb_node = rb_next(rb_node); - } - - rb_node = rb_first(blocks); - while (rb_node) { - block = rb_entry(rb_node, struct tree_block, rb_node); - - node = build_backref_tree(rc, cache, &block->key, - block->level, block->bytenr); - if (IS_ERR(node)) { - err = PTR_ERR(node); - goto out; - } - - ret = relocate_tree_block(trans, rc, node, &block->key, - path); - if (ret < 0) { - err = ret; - goto out; - } - remove_backref_node(cache, node); - rb_node = rb_next(rb_node); - } - - if (level > 0) - goto out; - - free_block_list(blocks); - - /* - * now backrefs of some upper level tree blocks have been cached, - * try relocating blocks referenced by these upper level blocks. - */ - while (1) { - struct backref_node *upper = NULL; - if (trans->transaction->in_commit || - trans->transaction->delayed_refs.flushing) - break; - - ret = add_adjacent_blocks(trans, rc, cache, blocks, level, - &upper); - if (ret < 0) - err = ret; - if (ret != 0) - break; + get_tree_block_key(rc, block); + rb_node = rb_next(rb_node); + } - rb_node = rb_first(blocks); - while (rb_node) { - block = rb_entry(rb_node, struct tree_block, rb_node); - if (trans->transaction->in_commit || - trans->transaction->delayed_refs.flushing) - goto out; - BUG_ON(!block->key_ready); - node = build_backref_tree(rc, cache, &block->key, - level, block->bytenr); - if (IS_ERR(node)) { - err = PTR_ERR(node); - goto out; - } + rb_node = rb_first(blocks); + while (rb_node) { + block = rb_entry(rb_node, struct tree_block, rb_node); - ret = relocate_tree_block(trans, rc, node, - &block->key, path); - if (ret < 0) { - err = ret; - goto out; - } - remove_backref_node(cache, node); - rb_node = rb_next(rb_node); + node = build_backref_tree(rc, &block->key, + block->level, block->bytenr); + if (IS_ERR(node)) { + err = PTR_ERR(node); + goto out; } - free_block_list(blocks); - if (upper) { - ret = link_to_upper(trans, upper, path); - if (ret < 0) { + ret = relocate_tree_block(trans, rc, node, &block->key, + path); + if (ret < 0) { + if (ret != -EAGAIN || rb_node == rb_first(blocks)) err = ret; - break; - } - remove_backref_node(cache, upper); + goto out; } + rb_node = rb_next(rb_node); } out: free_block_list(blocks); + err = finish_pending_nodes(trans, rc, path, err); - ret = finish_pending_nodes(trans, cache, path); - if (ret < 0) - err = ret; - - kfree(cache); btrfs_free_path(path); return err; } +static noinline_for_stack +int prealloc_file_extent_cluster(struct inode *inode, + struct file_extent_cluster *cluster) +{ + u64 alloc_hint = 0; + u64 start; + u64 end; + u64 offset = BTRFS_I(inode)->index_cnt; + u64 num_bytes; + int nr = 0; + int ret = 0; + + BUG_ON(cluster->start != cluster->boundary[0]); + mutex_lock(&inode->i_mutex); + + ret = btrfs_check_data_free_space(inode, cluster->end + + 1 - cluster->start); + if (ret) + goto out; + + while (nr < cluster->nr) { + start = cluster->boundary[nr] - offset; + if (nr + 1 < cluster->nr) + end = cluster->boundary[nr + 1] - 1 - offset; + else + end = cluster->end - offset; + + lock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + num_bytes = end + 1 - start; + ret = btrfs_prealloc_file_range(inode, 0, start, + num_bytes, num_bytes, + end + 1, &alloc_hint); + unlock_extent(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); + if (ret) + break; + nr++; + } + btrfs_free_reserved_data_space(inode, cluster->end + + 1 - cluster->start); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + static noinline_for_stack int setup_extent_mapping(struct inode *inode, u64 start, u64 end, u64 block_start) @@ -2588,7 +2894,6 @@ static int relocate_file_extent_cluster(struct inode *inode, u64 offset = BTRFS_I(inode)->index_cnt; unsigned long index; unsigned long last_index; - unsigned int dirty_page = 0; struct page *page; struct file_ra_state *ra; int nr = 0; @@ -2601,21 +2906,24 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!ra) return -ENOMEM; - index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; - last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; + ret = prealloc_file_extent_cluster(inode, cluster); + if (ret) + goto out; - mutex_lock(&inode->i_mutex); + file_ra_state_init(ra, inode->i_mapping); - i_size_write(inode, cluster->end + 1 - offset); ret = setup_extent_mapping(inode, cluster->start - offset, cluster->end - offset, cluster->start); if (ret) - goto out_unlock; - - file_ra_state_init(ra, inode->i_mapping); + goto out; - WARN_ON(cluster->start != cluster->boundary[0]); + index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; + last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; while (index <= last_index) { + ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); + if (ret) + goto out; + page = find_lock_page(inode->i_mapping, index); if (!page) { page_cache_sync_readahead(inode->i_mapping, @@ -2623,8 +2931,10 @@ static int relocate_file_extent_cluster(struct inode *inode, last_index + 1 - index); page = grab_cache_page(inode->i_mapping, index); if (!page) { + btrfs_delalloc_release_metadata(inode, + PAGE_CACHE_SIZE); ret = -ENOMEM; - goto out_unlock; + goto out; } } @@ -2640,8 +2950,10 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!PageUptodate(page)) { unlock_page(page); page_cache_release(page); + btrfs_delalloc_release_metadata(inode, + PAGE_CACHE_SIZE); ret = -EIO; - goto out_unlock; + goto out; } } @@ -2660,10 +2972,9 @@ static int relocate_file_extent_cluster(struct inode *inode, EXTENT_BOUNDARY, GFP_NOFS); nr++; } - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); + btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); set_page_dirty(page); - dirty_page++; unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); @@ -2671,20 +2982,11 @@ static int relocate_file_extent_cluster(struct inode *inode, page_cache_release(page); index++; - if (nr < cluster->nr && - page_end + 1 + offset == cluster->boundary[nr]) { - balance_dirty_pages_ratelimited_nr(inode->i_mapping, - dirty_page); - dirty_page = 0; - } - } - if (dirty_page) { - balance_dirty_pages_ratelimited_nr(inode->i_mapping, - dirty_page); + balance_dirty_pages_ratelimited(inode->i_mapping); + btrfs_throttle(BTRFS_I(inode)->root); } WARN_ON(nr != cluster->nr); -out_unlock: - mutex_unlock(&inode->i_mutex); +out: kfree(ra); return ret; } @@ -2870,9 +3172,6 @@ out: static int block_use_full_backref(struct reloc_control *rc, struct extent_buffer *eb) { - struct btrfs_path *path; - struct btrfs_extent_item *ei; - struct btrfs_key key; u64 flags; int ret; @@ -2880,28 +3179,14 @@ static int block_use_full_backref(struct reloc_control *rc, btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) return 1; - path = btrfs_alloc_path(); - BUG_ON(!path); - - key.objectid = eb->start; - key.type = BTRFS_EXTENT_ITEM_KEY; - key.offset = eb->len; - - path->search_commit_root = 1; - path->skip_locking = 1; - ret = btrfs_search_slot(NULL, rc->extent_root, - &key, path, 0, 0); + ret = btrfs_lookup_extent_info(NULL, rc->extent_root, + eb->start, eb->len, NULL, &flags); BUG_ON(ret); - ei = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_extent_item); - flags = btrfs_extent_flags(path->nodes[0], ei); - BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) ret = 1; else ret = 0; - btrfs_free_path(path); return ret; } @@ -3074,22 +3359,10 @@ int add_data_references(struct reloc_control *rc, struct btrfs_extent_inline_ref *iref; unsigned long ptr; unsigned long end; - u32 blocksize; + u32 blocksize = btrfs_level_size(rc->extent_root, 0); int ret; int err = 0; - ret = get_new_location(rc->data_inode, NULL, extent_key->objectid, - extent_key->offset); - BUG_ON(ret < 0); - if (ret > 0) { - /* the relocated data is fragmented */ - rc->extents_skipped++; - btrfs_release_path(rc->extent_root, path); - return 0; - } - - blocksize = btrfs_level_size(rc->extent_root, 0); - eb = path->nodes[0]; ptr = btrfs_item_ptr_offset(eb, path->slots[0]); end = ptr + btrfs_item_size_nr(eb, path->slots[0]); @@ -3170,7 +3443,8 @@ int add_data_references(struct reloc_control *rc, */ static noinline_for_stack int find_next_extent(struct btrfs_trans_handle *trans, - struct reloc_control *rc, struct btrfs_path *path) + struct reloc_control *rc, struct btrfs_path *path, + struct btrfs_key *extent_key) { struct btrfs_key key; struct extent_buffer *leaf; @@ -3225,6 +3499,7 @@ next: rc->search_start = end + 1; } else { rc->search_start = key.objectid + key.offset; + memcpy(extent_key, &key, sizeof(key)); return 0; } } @@ -3262,12 +3537,49 @@ static int check_extent_flags(u64 flags) return 0; } +static noinline_for_stack +int prepare_to_relocate(struct reloc_control *rc) +{ + struct btrfs_trans_handle *trans; + int ret; + + rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root); + if (!rc->block_rsv) + return -ENOMEM; + + /* + * reserve some space for creating reloc trees. + * btrfs_init_reloc_root will use them when there + * is no reservation in transaction handle. + */ + ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv, + rc->extent_root->nodesize * 256, + &rc->block_rsv_retries); + if (ret) + return ret; + + rc->block_rsv->refill_used = 1; + btrfs_add_durable_block_rsv(rc->extent_root->fs_info, rc->block_rsv); + + memset(&rc->cluster, 0, sizeof(rc->cluster)); + rc->search_start = rc->block_group->key.objectid; + rc->extents_found = 0; + rc->nodes_relocated = 0; + rc->merging_rsv_size = 0; + rc->block_rsv_retries = 0; + + rc->create_reloc_tree = 1; + set_reloc_control(rc); + + trans = btrfs_join_transaction(rc->extent_root, 1); + btrfs_commit_transaction(trans, rc->extent_root); + return 0; +} static noinline_for_stack int relocate_block_group(struct reloc_control *rc) { struct rb_root blocks = RB_ROOT; struct btrfs_key key; - struct file_extent_cluster *cluster; struct btrfs_trans_handle *trans = NULL; struct btrfs_path *path; struct btrfs_extent_item *ei; @@ -3277,33 +3589,25 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) int ret; int err = 0; - cluster = kzalloc(sizeof(*cluster), GFP_NOFS); - if (!cluster) - return -ENOMEM; - path = btrfs_alloc_path(); - if (!path) { - kfree(cluster); + if (!path) return -ENOMEM; - } - - rc->extents_found = 0; - rc->extents_skipped = 0; - rc->search_start = rc->block_group->key.objectid; - clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, - GFP_NOFS); - - rc->create_reloc_root = 1; - set_reloc_control(rc); - - trans = btrfs_start_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); + ret = prepare_to_relocate(rc); + if (ret) { + err = ret; + goto out_free; + } while (1) { - trans = btrfs_start_transaction(rc->extent_root, 1); + trans = btrfs_start_transaction(rc->extent_root, 0); + + if (update_backref_cache(trans, &rc->backref_cache)) { + btrfs_end_transaction(trans, rc->extent_root); + continue; + } - ret = find_next_extent(trans, rc, path); + ret = find_next_extent(trans, rc, path, &key); if (ret < 0) err = ret; if (ret != 0) @@ -3313,9 +3617,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); - btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); - item_size = btrfs_item_size_nr(path->nodes[0], - path->slots[0]); + item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); if (item_size >= sizeof(*ei)) { flags = btrfs_extent_flags(path->nodes[0], ei); ret = check_extent_flags(flags); @@ -3356,73 +3658,100 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { ret = add_tree_block(rc, &key, path, &blocks); } else if (rc->stage == UPDATE_DATA_PTRS && - (flags & BTRFS_EXTENT_FLAG_DATA)) { + (flags & BTRFS_EXTENT_FLAG_DATA)) { ret = add_data_references(rc, &key, path, &blocks); } else { btrfs_release_path(rc->extent_root, path); ret = 0; } if (ret < 0) { - err = 0; + err = ret; break; } if (!RB_EMPTY_ROOT(&blocks)) { ret = relocate_tree_blocks(trans, rc, &blocks); if (ret < 0) { + if (ret != -EAGAIN) { + err = ret; + break; + } + rc->extents_found--; + rc->search_start = key.objectid; + } + } + + ret = btrfs_block_rsv_check(trans, rc->extent_root, + rc->block_rsv, 0, 5); + if (ret < 0) { + if (ret != -EAGAIN) { err = ret; + WARN_ON(1); break; } + rc->commit_transaction = 1; } - nr = trans->blocks_used; - btrfs_end_transaction(trans, rc->extent_root); + if (rc->commit_transaction) { + rc->commit_transaction = 0; + ret = btrfs_commit_transaction(trans, rc->extent_root); + BUG_ON(ret); + } else { + nr = trans->blocks_used; + btrfs_end_transaction_throttle(trans, rc->extent_root); + btrfs_btree_balance_dirty(rc->extent_root, nr); + } trans = NULL; - btrfs_btree_balance_dirty(rc->extent_root, nr); if (rc->stage == MOVE_DATA_EXTENTS && (flags & BTRFS_EXTENT_FLAG_DATA)) { rc->found_file_extent = 1; ret = relocate_data_extent(rc->data_inode, - &key, cluster); + &key, &rc->cluster); if (ret < 0) { err = ret; break; } } } - btrfs_free_path(path); + + btrfs_release_path(rc->extent_root, path); + clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, + GFP_NOFS); if (trans) { nr = trans->blocks_used; - btrfs_end_transaction(trans, rc->extent_root); + btrfs_end_transaction_throttle(trans, rc->extent_root); btrfs_btree_balance_dirty(rc->extent_root, nr); } if (!err) { - ret = relocate_file_extent_cluster(rc->data_inode, cluster); + ret = relocate_file_extent_cluster(rc->data_inode, + &rc->cluster); if (ret < 0) err = ret; } - kfree(cluster); + rc->create_reloc_tree = 0; + set_reloc_control(rc); - rc->create_reloc_root = 0; - smp_mb(); + backref_cache_cleanup(&rc->backref_cache); + btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); - if (rc->extents_found > 0) { - trans = btrfs_start_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); - } + err = prepare_to_merge(rc, err); merge_reloc_roots(rc); + rc->merge_reloc_tree = 0; unset_reloc_control(rc); + btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); /* get rid of pinned extents */ - trans = btrfs_start_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root, 1); btrfs_commit_transaction(trans, rc->extent_root); - +out_free: + btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); + btrfs_free_path(path); return err; } @@ -3448,7 +3777,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_generation(leaf, item, 1); btrfs_set_inode_size(leaf, item, 0); btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); - btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS); + btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | + BTRFS_INODE_PREALLOC); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); out: @@ -3460,8 +3790,9 @@ out: * helper to create inode for data relocation. * the inode is in data relocation tree and its link count is 0 */ -static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, - struct btrfs_block_group_cache *group) +static noinline_for_stack +struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, + struct btrfs_block_group_cache *group) { struct inode *inode = NULL; struct btrfs_trans_handle *trans; @@ -3475,8 +3806,9 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, if (IS_ERR(root)) return ERR_CAST(root); - trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); + trans = btrfs_start_transaction(root, 6); + if (IS_ERR(trans)) + return ERR_CAST(trans); err = btrfs_find_free_objectid(trans, root, objectid, &objectid); if (err) @@ -3496,7 +3828,6 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, out: nr = trans->blocks_used; btrfs_end_transaction(trans, root); - btrfs_btree_balance_dirty(root, nr); if (err) { if (inode) @@ -3506,6 +3837,21 @@ out: return inode; } +static struct reloc_control *alloc_reloc_control(void) +{ + struct reloc_control *rc; + + rc = kzalloc(sizeof(*rc), GFP_NOFS); + if (!rc) + return NULL; + + INIT_LIST_HEAD(&rc->reloc_roots); + backref_cache_init(&rc->backref_cache); + mapping_tree_init(&rc->reloc_root_tree); + extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); + return rc; +} + /* * function to relocate all extents in a block group. */ @@ -3514,24 +3860,26 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) struct btrfs_fs_info *fs_info = extent_root->fs_info; struct reloc_control *rc; int ret; + int rw = 0; int err = 0; - rc = kzalloc(sizeof(*rc), GFP_NOFS); + rc = alloc_reloc_control(); if (!rc) return -ENOMEM; - mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); - INIT_LIST_HEAD(&rc->reloc_roots); + rc->extent_root = extent_root; rc->block_group = btrfs_lookup_block_group(fs_info, group_start); BUG_ON(!rc->block_group); - btrfs_init_workers(&rc->workers, "relocate", - fs_info->thread_pool_size, NULL); - - rc->extent_root = extent_root; - btrfs_prepare_block_group_relocation(extent_root, rc->block_group); + if (!rc->block_group->ro) { + ret = btrfs_set_block_group_ro(extent_root, rc->block_group); + if (ret) { + err = ret; + goto out; + } + rw = 1; + } rc->data_inode = create_reloc_inode(fs_info, rc->block_group); if (IS_ERR(rc->data_inode)) { @@ -3548,9 +3896,6 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) btrfs_wait_ordered_extents(fs_info->tree_root, 0, 0); while (1) { - rc->extents_found = 0; - rc->extents_skipped = 0; - mutex_lock(&fs_info->cleaner_mutex); btrfs_clean_old_snapshots(fs_info->tree_root); @@ -3559,7 +3904,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) { err = ret; - break; + goto out; } if (rc->extents_found == 0) @@ -3573,18 +3918,6 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) invalidate_mapping_pages(rc->data_inode->i_mapping, 0, -1); rc->stage = UPDATE_DATA_PTRS; - } else if (rc->stage == UPDATE_DATA_PTRS && - rc->extents_skipped >= rc->extents_found) { - iput(rc->data_inode); - rc->data_inode = create_reloc_inode(fs_info, - rc->block_group); - if (IS_ERR(rc->data_inode)) { - err = PTR_ERR(rc->data_inode); - rc->data_inode = NULL; - break; - } - rc->stage = MOVE_DATA_EXTENTS; - rc->found_file_extent = 0; } } @@ -3597,8 +3930,9 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) WARN_ON(rc->block_group->reserved > 0); WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0); out: + if (err && rw) + btrfs_set_block_group_rw(extent_root, rc->block_group); iput(rc->data_inode); - btrfs_stop_workers(&rc->workers); btrfs_put_block_group(rc->block_group); kfree(rc); return err; @@ -3609,7 +3943,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) struct btrfs_trans_handle *trans; int ret; - trans = btrfs_start_transaction(root->fs_info->tree_root, 1); + trans = btrfs_start_transaction(root->fs_info->tree_root, 0); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); @@ -3702,20 +4036,20 @@ int btrfs_recover_relocation(struct btrfs_root *root) if (list_empty(&reloc_roots)) goto out; - rc = kzalloc(sizeof(*rc), GFP_NOFS); + rc = alloc_reloc_control(); if (!rc) { err = -ENOMEM; goto out; } - mapping_tree_init(&rc->reloc_root_tree); - INIT_LIST_HEAD(&rc->reloc_roots); - btrfs_init_workers(&rc->workers, "relocate", - root->fs_info->thread_pool_size, NULL); rc->extent_root = root->fs_info->extent_root; set_reloc_control(rc); + trans = btrfs_join_transaction(rc->extent_root, 1); + + rc->merge_reloc_tree = 1; + while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); @@ -3735,20 +4069,16 @@ int btrfs_recover_relocation(struct btrfs_root *root) fs_root->reloc_root = reloc_root; } - trans = btrfs_start_transaction(rc->extent_root, 1); btrfs_commit_transaction(trans, rc->extent_root); merge_reloc_roots(rc); unset_reloc_control(rc); - trans = btrfs_start_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root, 1); btrfs_commit_transaction(trans, rc->extent_root); out: - if (rc) { - btrfs_stop_workers(&rc->workers); - kfree(rc); - } + kfree(rc); while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); @@ -3814,3 +4144,130 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) btrfs_put_ordered_extent(ordered); return 0; } + +void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct extent_buffer *buf, + struct extent_buffer *cow) +{ + struct reloc_control *rc; + struct backref_node *node; + int first_cow = 0; + int level; + int ret; + + rc = root->fs_info->reloc_ctl; + if (!rc) + return; + + BUG_ON(rc->stage == UPDATE_DATA_PTRS && + root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID); + + level = btrfs_header_level(buf); + if (btrfs_header_generation(buf) <= + btrfs_root_last_snapshot(&root->root_item)) + first_cow = 1; + + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID && + rc->create_reloc_tree) { + WARN_ON(!first_cow && level == 0); + + node = rc->backref_cache.path[level]; + BUG_ON(node->bytenr != buf->start && + node->new_bytenr != buf->start); + + drop_node_buffer(node); + extent_buffer_get(cow); + node->eb = cow; + node->new_bytenr = cow->start; + + if (!node->pending) { + list_move_tail(&node->list, + &rc->backref_cache.pending[level]); + node->pending = 1; + } + + if (first_cow) + __mark_block_processed(rc, node); + + if (first_cow && level > 0) + rc->nodes_relocated += buf->len; + } + + if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) { + ret = replace_file_extents(trans, rc, root, cow); + BUG_ON(ret); + } +} + +/* + * called before creating snapshot. it calculates metadata reservation + * requried for relocating tree blocks in the snapshot + */ +void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending, + u64 *bytes_to_reserve) +{ + struct btrfs_root *root; + struct reloc_control *rc; + + root = pending->root; + if (!root->reloc_root) + return; + + rc = root->fs_info->reloc_ctl; + if (!rc->merge_reloc_tree) + return; + + root = root->reloc_root; + BUG_ON(btrfs_root_refs(&root->root_item) == 0); + /* + * relocation is in the stage of merging trees. the space + * used by merging a reloc tree is twice the size of + * relocated tree nodes in the worst case. half for cowing + * the reloc tree, half for cowing the fs tree. the space + * used by cowing the reloc tree will be freed after the + * tree is dropped. if we create snapshot, cowing the fs + * tree may use more space than it frees. so we need + * reserve extra space. + */ + *bytes_to_reserve += rc->nodes_relocated; +} + +/* + * called after snapshot is created. migrate block reservation + * and create reloc root for the newly created snapshot + */ +void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_pending_snapshot *pending) +{ + struct btrfs_root *root = pending->root; + struct btrfs_root *reloc_root; + struct btrfs_root *new_root; + struct reloc_control *rc; + int ret; + + if (!root->reloc_root) + return; + + rc = root->fs_info->reloc_ctl; + rc->merging_rsv_size += rc->nodes_relocated; + + if (rc->merge_reloc_tree) { + ret = btrfs_block_rsv_migrate(&pending->block_rsv, + rc->block_rsv, + rc->nodes_relocated); + BUG_ON(ret); + } + + new_root = pending->snap; + reloc_root = create_reloc_root(trans, root->reloc_root, + new_root->root_key.objectid); + + __add_reloc_root(reloc_root); + new_root->reloc_root = reloc_root; + + if (rc->create_reloc_tree) { + ret = clone_backref_node(trans, rc, root, reloc_root); + BUG_ON(ret); + } +} diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 67fa2d29d663..b91ccd972644 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -259,6 +259,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_key key; + struct btrfs_key root_key; + struct btrfs_root *root; int err = 0; int ret; @@ -270,6 +272,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = 0; + root_key.type = BTRFS_ROOT_ITEM_KEY; + root_key.offset = (u64)-1; + while (1) { ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) { @@ -294,13 +299,25 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) key.type != BTRFS_ORPHAN_ITEM_KEY) break; - ret = btrfs_find_dead_roots(tree_root, key.offset); - if (ret) { + root_key.objectid = key.offset; + key.offset++; + + root = btrfs_read_fs_root_no_name(tree_root->fs_info, + &root_key); + if (!IS_ERR(root)) + continue; + + ret = PTR_ERR(root); + if (ret != -ENOENT) { err = ret; break; } - key.offset++; + ret = btrfs_find_dead_roots(tree_root, root_key.objectid); + if (ret) { + err = ret; + break; + } } btrfs_free_path(path); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2909a03e5230..d34b2dfc9628 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -498,7 +498,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_start_delalloc_inodes(root, 0); btrfs_wait_ordered_extents(root, 0, 0); - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); ret = btrfs_commit_transaction(trans, root); return ret; } @@ -694,11 +694,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) if (btrfs_super_log_root(&root->fs_info->super_copy) != 0) return -EINVAL; - /* recover relocation */ - ret = btrfs_recover_relocation(root); + ret = btrfs_cleanup_fs_roots(root->fs_info); WARN_ON(ret); - ret = btrfs_cleanup_fs_roots(root->fs_info); + /* recover relocation */ + ret = btrfs_recover_relocation(root); WARN_ON(ret); sb->s_flags &= ~MS_RDONLY; @@ -714,34 +714,18 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct list_head *head = &root->fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; - u64 data_used = 0; int bits = dentry->d_sb->s_blocksize_bits; __be32 *fsid = (__be32 *)root->fs_info->fsid; rcu_read_lock(); - list_for_each_entry_rcu(found, head, list) { - if (found->flags & (BTRFS_BLOCK_GROUP_DUP| - BTRFS_BLOCK_GROUP_RAID10| - BTRFS_BLOCK_GROUP_RAID1)) { - total_used += found->bytes_used; - if (found->flags & BTRFS_BLOCK_GROUP_DATA) - data_used += found->bytes_used; - else - data_used += found->total_bytes; - } - - total_used += found->bytes_used; - if (found->flags & BTRFS_BLOCK_GROUP_DATA) - data_used += found->bytes_used; - else - data_used += found->total_bytes; - } + list_for_each_entry_rcu(found, head, list) + total_used += found->disk_used; rcu_read_unlock(); buf->f_namelen = BTRFS_NAME_LEN; buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; buf->f_bfree = buf->f_blocks - (total_used >> bits); - buf->f_bavail = buf->f_blocks - (data_used >> bits); + buf->f_bavail = buf->f_bfree; buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2cb116099b90..66e4c66cc63b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -165,54 +165,89 @@ enum btrfs_trans_type { TRANS_USERSPACE, }; +static int may_wait_transaction(struct btrfs_root *root, int type) +{ + if (!root->fs_info->log_root_recovering && + ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || + type == TRANS_USERSPACE)) + return 1; + return 0; +} + static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, - int num_blocks, int type) + u64 num_items, int type) { - struct btrfs_trans_handle *h = - kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); + struct btrfs_trans_handle *h; + struct btrfs_transaction *cur_trans; + int retries = 0; int ret; +again: + h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); + if (!h) + return ERR_PTR(-ENOMEM); mutex_lock(&root->fs_info->trans_mutex); - if (!root->fs_info->log_root_recovering && - ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || - type == TRANS_USERSPACE)) + if (may_wait_transaction(root, type)) wait_current_trans(root); + ret = join_transaction(root); BUG_ON(ret); - h->transid = root->fs_info->running_transaction->transid; - h->transaction = root->fs_info->running_transaction; - h->blocks_reserved = num_blocks; + cur_trans = root->fs_info->running_transaction; + cur_trans->use_count++; + mutex_unlock(&root->fs_info->trans_mutex); + + h->transid = cur_trans->transid; + h->transaction = cur_trans; h->blocks_used = 0; h->block_group = 0; - h->alloc_exclude_nr = 0; - h->alloc_exclude_start = 0; + h->bytes_reserved = 0; h->delayed_ref_updates = 0; + h->block_rsv = NULL; - if (!current->journal_info && type != TRANS_USERSPACE) - current->journal_info = h; + smp_mb(); + if (cur_trans->blocked && may_wait_transaction(root, type)) { + btrfs_commit_transaction(h, root); + goto again; + } + + if (num_items > 0) { + ret = btrfs_trans_reserve_metadata(h, root, num_items, + &retries); + if (ret == -EAGAIN) { + btrfs_commit_transaction(h, root); + goto again; + } + if (ret < 0) { + btrfs_end_transaction(h, root); + return ERR_PTR(ret); + } + } - root->fs_info->running_transaction->use_count++; + mutex_lock(&root->fs_info->trans_mutex); record_root_in_trans(h, root); mutex_unlock(&root->fs_info->trans_mutex); + + if (!current->journal_info && type != TRANS_USERSPACE) + current->journal_info = h; return h; } struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, - int num_blocks) + int num_items) { - return start_transaction(root, num_blocks, TRANS_START); + return start_transaction(root, num_items, TRANS_START); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, int num_blocks) { - return start_transaction(root, num_blocks, TRANS_JOIN); + return start_transaction(root, 0, TRANS_JOIN); } struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, int num_blocks) { - return start_transaction(r, num_blocks, TRANS_USERSPACE); + return start_transaction(r, 0, TRANS_USERSPACE); } /* wait for a transaction commit to be fully complete */ @@ -286,10 +321,36 @@ void btrfs_throttle(struct btrfs_root *root) mutex_unlock(&root->fs_info->trans_mutex); } +static int should_end_transaction(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + int ret; + ret = btrfs_block_rsv_check(trans, root, + &root->fs_info->global_block_rsv, 0, 5); + return ret ? 1 : 0; +} + +int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_transaction *cur_trans = trans->transaction; + int updates; + + if (cur_trans->blocked || cur_trans->delayed_refs.flushing) + return 1; + + updates = trans->delayed_ref_updates; + trans->delayed_ref_updates = 0; + if (updates) + btrfs_run_delayed_refs(trans, root, updates); + + return should_end_transaction(trans, root); +} + static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root, int throttle) { - struct btrfs_transaction *cur_trans; + struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *info = root->fs_info; int count = 0; @@ -313,9 +374,21 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, count++; } + btrfs_trans_release_metadata(trans, root); + + if (!root->fs_info->open_ioctl_trans && + should_end_transaction(trans, root)) + trans->transaction->blocked = 1; + + if (cur_trans->blocked && !cur_trans->in_commit) { + if (throttle) + return btrfs_commit_transaction(trans, root); + else + wake_up_process(info->transaction_kthread); + } + mutex_lock(&info->trans_mutex); - cur_trans = info->running_transaction; - WARN_ON(cur_trans != trans->transaction); + WARN_ON(cur_trans != info->running_transaction); WARN_ON(cur_trans->num_writers < 1); cur_trans->num_writers--; @@ -603,6 +676,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_free_log(trans, root); btrfs_update_reloc_root(trans, root); + btrfs_orphan_commit_root(trans, root); if (root->commit_root != root->node) { switch_commit_root(root); @@ -627,30 +701,30 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) { struct btrfs_fs_info *info = root->fs_info; - int ret; struct btrfs_trans_handle *trans; + int ret; unsigned long nr; - smp_mb(); - if (root->defrag_running) + if (xchg(&root->defrag_running, 1)) return 0; - trans = btrfs_start_transaction(root, 1); + while (1) { - root->defrag_running = 1; + trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); + ret = btrfs_defrag_leaves(trans, root, cacheonly); + nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(info->tree_root, nr); cond_resched(); - trans = btrfs_start_transaction(root, 1); if (root->fs_info->closing || ret != -EAGAIN) break; } root->defrag_running = 0; - smp_mb(); - btrfs_end_transaction(trans, root); - return 0; + return ret; } #if 0 @@ -758,47 +832,63 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root *root = pending->root; struct btrfs_root *parent_root; struct inode *parent_inode; + struct dentry *dentry; struct extent_buffer *tmp; struct extent_buffer *old; int ret; - u64 objectid; - int namelen; + int retries = 0; + u64 to_reserve = 0; u64 index = 0; - - parent_inode = pending->dentry->d_parent->d_inode; - parent_root = BTRFS_I(parent_inode)->root; + u64 objectid; new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); if (!new_root_item) { - ret = -ENOMEM; + pending->error = -ENOMEM; goto fail; } + ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); - if (ret) + if (ret) { + pending->error = ret; goto fail; + } + + btrfs_reloc_pre_snapshot(trans, pending, &to_reserve); + btrfs_orphan_pre_snapshot(trans, pending, &to_reserve); + + if (to_reserve > 0) { + ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv, + to_reserve, &retries); + if (ret) { + pending->error = ret; + goto fail; + } + } key.objectid = objectid; - /* record when the snapshot was created in key.offset */ - key.offset = trans->transid; - btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); + key.offset = (u64)-1; + key.type = BTRFS_ROOT_ITEM_KEY; - memcpy(&pending->root_key, &key, sizeof(key)); - pending->root_key.offset = (u64)-1; + trans->block_rsv = &pending->block_rsv; + dentry = pending->dentry; + parent_inode = dentry->d_parent->d_inode; + parent_root = BTRFS_I(parent_inode)->root; record_root_in_trans(trans, parent_root); + /* * insert the directory item */ - namelen = strlen(pending->name); ret = btrfs_set_inode_index(parent_inode, &index); BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, - pending->name, namelen, - parent_inode->i_ino, - &pending->root_key, BTRFS_FT_DIR, index); + dentry->d_name.name, dentry->d_name.len, + parent_inode->i_ino, &key, + BTRFS_FT_DIR, index); BUG_ON(ret); - btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2); + btrfs_i_size_write(parent_inode, parent_inode->i_size + + dentry->d_name.len * 2); ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); @@ -815,22 +905,32 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, free_extent_buffer(old); btrfs_set_root_node(new_root_item, tmp); - ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, - new_root_item); - BUG_ON(ret); + /* record when the snapshot was created in key.offset */ + key.offset = trans->transid; + ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); btrfs_tree_unlock(tmp); free_extent_buffer(tmp); + BUG_ON(ret); - ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root, - pending->root_key.objectid, + /* + * insert root back/forward references + */ + ret = btrfs_add_root_ref(trans, tree_root, objectid, parent_root->root_key.objectid, - parent_inode->i_ino, index, pending->name, - namelen); + parent_inode->i_ino, index, + dentry->d_name.name, dentry->d_name.len); BUG_ON(ret); + key.offset = (u64)-1; + pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); + BUG_ON(IS_ERR(pending->snap)); + + btrfs_reloc_post_snapshot(trans, pending); + btrfs_orphan_post_snapshot(trans, pending); fail: kfree(new_root_item); - return ret; + btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); + return 0; } /* @@ -878,6 +978,16 @@ int btrfs_transaction_in_commit(struct btrfs_fs_info *info) return ret; } +int btrfs_transaction_blocked(struct btrfs_fs_info *info) +{ + int ret = 0; + spin_lock(&info->new_trans_lock); + if (info->running_transaction) + ret = info->running_transaction->blocked; + spin_unlock(&info->new_trans_lock); + return ret; +} + int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -899,6 +1009,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, 0); BUG_ON(ret); + btrfs_trans_release_metadata(trans, root); + cur_trans = trans->transaction; /* * set the flushing flag so procs in this transaction have to @@ -951,9 +1063,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, snap_pending = 1; WARN_ON(cur_trans != trans->transaction); - prepare_to_wait(&cur_trans->writer_wait, &wait, - TASK_UNINTERRUPTIBLE); - if (cur_trans->num_writers > 1) timeout = MAX_SCHEDULE_TIMEOUT; else if (should_grow) @@ -976,6 +1085,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, */ btrfs_run_ordered_operations(root, 1); + prepare_to_wait(&cur_trans->writer_wait, &wait, + TASK_UNINTERRUPTIBLE); + smp_mb(); if (cur_trans->num_writers > 1 || should_grow) schedule_timeout(timeout); @@ -1103,9 +1215,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) - btrfs_drop_snapshot(root, 0); + btrfs_drop_snapshot(root, NULL, 0); else - btrfs_drop_snapshot(root, 1); + btrfs_drop_snapshot(root, NULL, 1); } return 0; } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 93c7ccb33118..e104986d0bfd 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -45,20 +45,23 @@ struct btrfs_transaction { struct btrfs_trans_handle { u64 transid; + u64 block_group; + u64 bytes_reserved; unsigned long blocks_reserved; unsigned long blocks_used; - struct btrfs_transaction *transaction; - u64 block_group; - u64 alloc_exclude_start; - u64 alloc_exclude_nr; unsigned long delayed_ref_updates; + struct btrfs_transaction *transaction; + struct btrfs_block_rsv *block_rsv; }; struct btrfs_pending_snapshot { struct dentry *dentry; struct btrfs_root *root; - char *name; - struct btrfs_key root_key; + struct btrfs_root *snap; + /* block reservation for the operation */ + struct btrfs_block_rsv block_rsv; + /* extra metadata reseration for relocation */ + int error; struct list_head list; }; @@ -85,11 +88,11 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, - int num_blocks); + int num_items); struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, - int num_blocks); + int num_blocks); struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, - int num_blocks); + int num_blocks); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, @@ -103,6 +106,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, + struct btrfs_root *root); void btrfs_throttle(struct btrfs_root *root); int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root); @@ -112,5 +117,6 @@ int btrfs_write_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); int btrfs_wait_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages, int mark); +int btrfs_transaction_blocked(struct btrfs_fs_info *info); int btrfs_transaction_in_commit(struct btrfs_fs_info *info); #endif diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index b10eacdb1620..f7ac8e013ed7 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -117,13 +117,14 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, path->nodes[1], 0, cache_only, &last_ret, &root->defrag_progress); - WARN_ON(ret && ret != -EAGAIN); + if (ret) { + WARN_ON(ret == -EAGAIN); + goto out; + } if (next_key_ret == 0) { memcpy(&root->defrag_progress, &key, sizeof(key)); ret = -EAGAIN; } - - btrfs_release_path(root, path); out: if (path) btrfs_free_path(path); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index af57dd2b43d4..fb102a9aee9c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -135,6 +135,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int ret; + int err = 0; mutex_lock(&root->log_mutex); if (root->log_root) { @@ -155,17 +156,19 @@ static int start_log_trans(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->tree_log_mutex); if (!root->fs_info->log_root_tree) { ret = btrfs_init_log_root_tree(trans, root->fs_info); - BUG_ON(ret); + if (ret) + err = ret; } - if (!root->log_root) { + if (err == 0 && !root->log_root) { ret = btrfs_add_log_tree(trans, root); - BUG_ON(ret); + if (ret) + err = ret; } mutex_unlock(&root->fs_info->tree_log_mutex); root->log_batch++; atomic_inc(&root->log_writers); mutex_unlock(&root->log_mutex); - return 0; + return err; } /* @@ -376,7 +379,7 @@ insert: BUG_ON(ret); } } else if (ret) { - BUG(); + return ret; } dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); @@ -1699,9 +1702,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, next = btrfs_find_create_tree_block(root, bytenr, blocksize); - wc->process_func(root, next, wc, ptr_gen); - if (*level == 1) { + wc->process_func(root, next, wc, ptr_gen); + path->slots[*level]++; if (wc->free) { btrfs_read_buffer(next, ptr_gen); @@ -1734,35 +1737,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); - if (path->nodes[*level] == root->node) - parent = path->nodes[*level]; - else - parent = path->nodes[*level + 1]; - - bytenr = path->nodes[*level]->start; - - blocksize = btrfs_level_size(root, *level); - root_owner = btrfs_header_owner(parent); - root_gen = btrfs_header_generation(parent); - - wc->process_func(root, path->nodes[*level], wc, - btrfs_header_generation(path->nodes[*level])); - - if (wc->free) { - next = path->nodes[*level]; - btrfs_tree_lock(next); - clean_tree_block(trans, root, next); - btrfs_set_lock_blocking(next); - btrfs_wait_tree_block_writeback(next); - btrfs_tree_unlock(next); - - WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); - ret = btrfs_free_reserved_extent(root, bytenr, blocksize); - BUG_ON(ret); - } - free_extent_buffer(path->nodes[*level]); - path->nodes[*level] = NULL; - *level += 1; + path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); cond_resched(); return 0; @@ -1781,7 +1756,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; - if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { + if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { struct extent_buffer *node; node = path->nodes[i]; path->slots[i]++; @@ -2047,7 +2022,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, mutex_unlock(&log_root_tree->log_mutex); ret = update_log_root(trans, log); - BUG_ON(ret); mutex_lock(&log_root_tree->log_mutex); if (atomic_dec_and_test(&log_root_tree->log_writers)) { @@ -2056,6 +2030,15 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wake_up(&log_root_tree->log_writer_wait); } + if (ret) { + BUG_ON(ret != -ENOSPC); + root->fs_info->last_trans_log_full_commit = trans->transid; + btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); + mutex_unlock(&log_root_tree->log_mutex); + ret = -EAGAIN; + goto out; + } + index2 = log_root_tree->log_transid % 2; if (atomic_read(&log_root_tree->log_commit[index2])) { btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); @@ -2129,15 +2112,10 @@ out: return 0; } -/* - * free all the extents used by the tree log. This should be called - * at commit time of the full transaction - */ -int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) +static void free_log_tree(struct btrfs_trans_handle *trans, + struct btrfs_root *log) { int ret; - struct btrfs_root *log; - struct key; u64 start; u64 end; struct walk_control wc = { @@ -2145,10 +2123,6 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) .process_func = process_one_buffer }; - if (!root->log_root || root->fs_info->log_root_recovering) - return 0; - - log = root->log_root; ret = walk_log_tree(trans, log, &wc); BUG_ON(ret); @@ -2162,14 +2136,30 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); } - if (log->log_transid > 0) { - ret = btrfs_del_root(trans, root->fs_info->log_root_tree, - &log->root_key); - BUG_ON(ret); - } - root->log_root = NULL; free_extent_buffer(log->node); kfree(log); +} + +/* + * free all the extents used by the tree log. This should be called + * at commit time of the full transaction + */ +int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) +{ + if (root->log_root) { + free_log_tree(trans, root->log_root); + root->log_root = NULL; + } + return 0; +} + +int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info) +{ + if (fs_info->log_root_tree) { + free_log_tree(trans, fs_info->log_root_tree); + fs_info->log_root_tree = NULL; + } return 0; } @@ -2203,6 +2193,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, struct btrfs_dir_item *di; struct btrfs_path *path; int ret; + int err = 0; int bytes_del = 0; if (BTRFS_I(dir)->logged_trans < trans->transid) @@ -2218,7 +2209,11 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); - if (di && !IS_ERR(di)) { + if (IS_ERR(di)) { + err = PTR_ERR(di); + goto fail; + } + if (di) { ret = btrfs_delete_one_dir_name(trans, log, path, di); bytes_del += name_len; BUG_ON(ret); @@ -2226,7 +2221,11 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_release_path(log, path); di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, index, name, name_len, -1); - if (di && !IS_ERR(di)) { + if (IS_ERR(di)) { + err = PTR_ERR(di); + goto fail; + } + if (di) { ret = btrfs_delete_one_dir_name(trans, log, path, di); bytes_del += name_len; BUG_ON(ret); @@ -2244,6 +2243,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_release_path(log, path); ret = btrfs_search_slot(trans, log, &key, path, 0, 1); + if (ret < 0) { + err = ret; + goto fail; + } if (ret == 0) { struct btrfs_inode_item *item; u64 i_size; @@ -2261,9 +2264,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, ret = 0; btrfs_release_path(log, path); } - +fail: btrfs_free_path(path); mutex_unlock(&BTRFS_I(dir)->log_mutex); + if (ret == -ENOSPC) { + root->fs_info->last_trans_log_full_commit = trans->transid; + ret = 0; + } btrfs_end_log_trans(root); return 0; @@ -2291,6 +2298,10 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, dirid, &index); mutex_unlock(&BTRFS_I(inode)->log_mutex); + if (ret == -ENOSPC) { + root->fs_info->last_trans_log_full_commit = trans->transid; + ret = 0; + } btrfs_end_log_trans(root); return ret; @@ -2318,7 +2329,8 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, else key.type = BTRFS_DIR_LOG_INDEX_KEY; ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); - BUG_ON(ret); + if (ret) + return ret; item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_log_item); @@ -2343,6 +2355,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_key max_key; struct btrfs_root *log = root->log_root; struct extent_buffer *src; + int err = 0; int ret; int i; int nritems; @@ -2405,6 +2418,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, ret = overwrite_item(trans, log, dst_path, path->nodes[0], path->slots[0], &tmp); + if (ret) { + err = ret; + goto done; + } } } btrfs_release_path(root, path); @@ -2432,7 +2449,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, goto done; ret = overwrite_item(trans, log, dst_path, src, i, &min_key); - BUG_ON(ret); + if (ret) { + err = ret; + goto done; + } } path->slots[0] = nritems; @@ -2454,22 +2474,30 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, ret = overwrite_item(trans, log, dst_path, path->nodes[0], path->slots[0], &tmp); - - BUG_ON(ret); - last_offset = tmp.offset; + if (ret) + err = ret; + else + last_offset = tmp.offset; goto done; } } done: - *last_offset_ret = last_offset; btrfs_release_path(root, path); btrfs_release_path(log, dst_path); - /* insert the log range keys to indicate where the log is valid */ - ret = insert_dir_log_key(trans, log, path, key_type, inode->i_ino, - first_offset, last_offset); - BUG_ON(ret); - return 0; + if (err == 0) { + *last_offset_ret = last_offset; + /* + * insert the log range keys to indicate where the log + * is valid + */ + ret = insert_dir_log_key(trans, log, path, key_type, + inode->i_ino, first_offset, + last_offset); + if (ret) + err = ret; + } + return err; } /* @@ -2501,7 +2529,8 @@ again: ret = log_dir_items(trans, root, inode, path, dst_path, key_type, min_key, &max_key); - BUG_ON(ret); + if (ret) + return ret; if (max_key == (u64)-1) break; min_key = max_key + 1; @@ -2535,8 +2564,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, while (1) { ret = btrfs_search_slot(trans, log, &key, path, -1, 1); - - if (ret != 1) + BUG_ON(ret == 0); + if (ret < 0) break; if (path->slots[0] == 0) @@ -2554,7 +2583,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, btrfs_release_path(log, path); } btrfs_release_path(log, path); - return 0; + return ret; } static noinline int copy_items(struct btrfs_trans_handle *trans, @@ -2587,7 +2616,10 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, } ret = btrfs_insert_empty_items(trans, log, dst_path, ins_keys, ins_sizes, nr); - BUG_ON(ret); + if (ret) { + kfree(ins_data); + return ret; + } for (i = 0; i < nr; i++, dst_path->slots[0]++) { dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], @@ -2660,16 +2692,17 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ + ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); - ret = btrfs_csum_file_blocks(trans, log, sums); - BUG_ON(ret); + if (!ret) + ret = btrfs_csum_file_blocks(trans, log, sums); list_del(&sums->list); kfree(sums); } - return 0; + return ret; } /* log a single inode in the tree log. @@ -2697,6 +2730,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, struct btrfs_root *log = root->log_root; struct extent_buffer *src = NULL; u32 size; + int err = 0; int ret; int nritems; int ins_start_slot = 0; @@ -2739,7 +2773,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, } else { ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); } - BUG_ON(ret); + if (ret) { + err = ret; + goto out_unlock; + } path->keep_locks = 1; while (1) { @@ -2768,7 +2805,10 @@ again: ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); - BUG_ON(ret); + if (ret) { + err = ret; + goto out_unlock; + } ins_nr = 1; ins_start_slot = path->slots[0]; next_slot: @@ -2784,7 +2824,10 @@ next_slot: ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); - BUG_ON(ret); + if (ret) { + err = ret; + goto out_unlock; + } ins_nr = 0; } btrfs_release_path(root, path); @@ -2802,7 +2845,10 @@ next_slot: ret = copy_items(trans, log, dst_path, src, ins_start_slot, ins_nr, inode_only); - BUG_ON(ret); + if (ret) { + err = ret; + goto out_unlock; + } ins_nr = 0; } WARN_ON(ins_nr); @@ -2810,14 +2856,18 @@ next_slot: btrfs_release_path(root, path); btrfs_release_path(log, dst_path); ret = log_directory_changes(trans, root, inode, path, dst_path); - BUG_ON(ret); + if (ret) { + err = ret; + goto out_unlock; + } } BTRFS_I(inode)->logged_trans = trans->transid; +out_unlock: mutex_unlock(&BTRFS_I(inode)->log_mutex); btrfs_free_path(path); btrfs_free_path(dst_path); - return 0; + return err; } /* @@ -2942,10 +2992,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_no_trans; } - start_log_trans(trans, root); + ret = start_log_trans(trans, root); + if (ret) + goto end_trans; ret = btrfs_log_inode(trans, root, inode, inode_only); - BUG_ON(ret); + if (ret) + goto end_trans; /* * for regular files, if its inode is already on disk, we don't @@ -2955,8 +3008,10 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, */ if (S_ISREG(inode->i_mode) && BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) - goto no_parent; + BTRFS_I(inode)->last_unlink_trans <= last_committed) { + ret = 0; + goto end_trans; + } inode_only = LOG_INODE_EXISTS; while (1) { @@ -2970,15 +3025,21 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { ret = btrfs_log_inode(trans, root, inode, inode_only); - BUG_ON(ret); + if (ret) + goto end_trans; } if (IS_ROOT(parent)) break; parent = parent->d_parent; } -no_parent: ret = 0; +end_trans: + if (ret < 0) { + BUG_ON(ret != -ENOSPC); + root->fs_info->last_trans_log_full_commit = trans->transid; + ret = 1; + } btrfs_end_log_trans(root); end_no_trans: return ret; @@ -3020,7 +3081,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) path = btrfs_alloc_path(); BUG_ON(!path); - trans = btrfs_start_transaction(fs_info->tree_root, 1); + trans = btrfs_start_transaction(fs_info->tree_root, 0); wc.trans = trans; wc.pin = 1; diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 0776eacb5083..3dfae84c8cc8 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h @@ -25,6 +25,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info); int btrfs_recover_log_trees(struct btrfs_root *tree_root); int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct dentry *dentry); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8db7b14bbae8..d6e3af8be95b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1097,7 +1097,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, if (!path) return -ENOMEM; - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1486,7 +1486,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) goto error; } - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); lock_chunks(root); device->barriers = 1; @@ -1751,9 +1751,10 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, /* step one, relocate all the extents inside this chunk */ ret = btrfs_relocate_block_group(extent_root, chunk_offset); - BUG_ON(ret); + if (ret) + return ret; - trans = btrfs_start_transaction(root, 1); + trans = btrfs_start_transaction(root, 0); BUG_ON(!trans); lock_chunks(root); @@ -1925,7 +1926,7 @@ int btrfs_balance(struct btrfs_root *dev_root) break; BUG_ON(ret); - trans = btrfs_start_transaction(dev_root, 1); + trans = btrfs_start_transaction(dev_root, 0); BUG_ON(!trans); ret = btrfs_grow_device(trans, device, old_size); @@ -2094,11 +2095,7 @@ again: } /* Shrinking succeeded, else we would be at "done". */ - trans = btrfs_start_transaction(root, 1); - if (!trans) { - ret = -ENOMEM; - goto done; - } + trans = btrfs_start_transaction(root, 0); lock_chunks(root); device->disk_total_bytes = new_size; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 59acd3eb288a..88ecbb215878 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -154,15 +154,10 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, if (trans) return do_setxattr(trans, inode, name, value, size, flags); - ret = btrfs_reserve_metadata_space(root, 2); - if (ret) - return ret; + trans = btrfs_start_transaction(root, 2); + if (IS_ERR(trans)) + return PTR_ERR(trans); - trans = btrfs_start_transaction(root, 1); - if (!trans) { - ret = -ENOMEM; - goto out; - } btrfs_set_trans_block_group(trans, inode); ret = do_setxattr(trans, inode, name, value, size, flags); @@ -174,7 +169,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, BUG_ON(ret); out: btrfs_end_transaction_throttle(trans, root); - btrfs_unreserve_metadata_space(root, 2); return ret; } diff --git a/fs/buffer.c b/fs/buffer.c index e8aa7081d25c..d54812b198e9 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1949,14 +1949,11 @@ static int __block_commit_write(struct inode *inode, struct page *page, } /* - * block_write_begin takes care of the basic task of block allocation and - * bringing partial write blocks uptodate first. - * - * If *pagep is not NULL, then block_write_begin uses the locked page - * at *pagep rather than allocating its own. In this case, the page will - * not be unlocked or deallocated on failure. + * Filesystems implementing the new truncate sequence should use the + * _newtrunc postfix variant which won't incorrectly call vmtruncate. + * The filesystem needs to handle block truncation upon failure. */ -int block_write_begin(struct file *file, struct address_space *mapping, +int block_write_begin_newtrunc(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block) @@ -1992,20 +1989,50 @@ int block_write_begin(struct file *file, struct address_space *mapping, unlock_page(page); page_cache_release(page); *pagep = NULL; - - /* - * prepare_write() may have instantiated a few blocks - * outside i_size. Trim these off again. Don't need - * i_size_read because we hold i_mutex. - */ - if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); } } out: return status; } +EXPORT_SYMBOL(block_write_begin_newtrunc); + +/* + * block_write_begin takes care of the basic task of block allocation and + * bringing partial write blocks uptodate first. + * + * If *pagep is not NULL, then block_write_begin uses the locked page + * at *pagep rather than allocating its own. In this case, the page will + * not be unlocked or deallocated on failure. + */ +int block_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + get_block_t *get_block) +{ + int ret; + + ret = block_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, get_block); + + /* + * prepare_write() may have instantiated a few blocks + * outside i_size. Trim these off again. Don't need + * i_size_read because we hold i_mutex. + * + * Filesystems which pass down their own page also cannot + * call into vmtruncate here because it would lead to lock + * inversion problems (*pagep is locked). This is a further + * example of where the old truncate sequence is inadequate. + */ + if (unlikely(ret) && *pagep == NULL) { + loff_t isize = mapping->host->i_size; + if (pos + len > isize) + vmtruncate(mapping->host, isize); + } + + return ret; +} EXPORT_SYMBOL(block_write_begin); int block_write_end(struct file *file, struct address_space *mapping, @@ -2324,7 +2351,7 @@ out: * For moronic filesystems that do not allow holes in file. * We may have to extend the file. */ -int cont_write_begin(struct file *file, struct address_space *mapping, +int cont_write_begin_newtrunc(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block, loff_t *bytes) @@ -2345,11 +2372,30 @@ int cont_write_begin(struct file *file, struct address_space *mapping, } *pagep = NULL; - err = block_write_begin(file, mapping, pos, len, + err = block_write_begin_newtrunc(file, mapping, pos, len, flags, pagep, fsdata, get_block); out: return err; } +EXPORT_SYMBOL(cont_write_begin_newtrunc); + +int cont_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + get_block_t *get_block, loff_t *bytes) +{ + int ret; + + ret = cont_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, get_block, bytes); + if (unlikely(ret)) { + loff_t isize = mapping->host->i_size; + if (pos + len > isize) + vmtruncate(mapping->host, isize); + } + + return ret; +} EXPORT_SYMBOL(cont_write_begin); int block_prepare_write(struct page *page, unsigned from, unsigned to, @@ -2381,7 +2427,7 @@ EXPORT_SYMBOL(block_commit_write); * * We are not allowed to take the i_mutex here so we have to play games to * protect against truncate races as the page could now be beyond EOF. Because - * vmtruncate() writes the inode size before removing pages, once we have the + * truncate writes the inode size before removing pages, once we have the * page lock we can determine safely if the page is beyond EOF. If it is not * beyond EOF, then the page is guaranteed safe against truncation until we * unlock the page. @@ -2464,10 +2510,11 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head) } /* - * On entry, the page is fully not uptodate. - * On exit the page is fully uptodate in the areas outside (from,to) + * Filesystems implementing the new truncate sequence should use the + * _newtrunc postfix variant which won't incorrectly call vmtruncate. + * The filesystem needs to handle block truncation upon failure. */ -int nobh_write_begin(struct file *file, struct address_space *mapping, +int nobh_write_begin_newtrunc(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata, get_block_t *get_block) @@ -2500,8 +2547,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, unlock_page(page); page_cache_release(page); *pagep = NULL; - return block_write_begin(file, mapping, pos, len, flags, pagep, - fsdata, get_block); + return block_write_begin_newtrunc(file, mapping, pos, len, + flags, pagep, fsdata, get_block); } if (PageMappedToDisk(page)) @@ -2605,8 +2652,34 @@ out_release: page_cache_release(page); *pagep = NULL; - if (pos + len > inode->i_size) - vmtruncate(inode, inode->i_size); + return ret; +} +EXPORT_SYMBOL(nobh_write_begin_newtrunc); + +/* + * On entry, the page is fully not uptodate. + * On exit the page is fully uptodate in the areas outside (from,to) + */ +int nobh_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata, + get_block_t *get_block) +{ + int ret; + + ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, get_block); + + /* + * prepare_write() may have instantiated a few blocks + * outside i_size. Trim these off again. Don't need + * i_size_read because we hold i_mutex. + */ + if (unlikely(ret)) { + loff_t isize = mapping->host->i_size; + if (pos + len > isize) + vmtruncate(mapping->host, isize); + } return ret; } diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c index 9f46de2ba7a7..89490beaf537 100644 --- a/fs/ceph/auth.c +++ b/fs/ceph/auth.c @@ -1,7 +1,6 @@ #include "ceph_debug.h" #include -#include #include #include @@ -217,8 +216,8 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, if (ac->protocol != protocol) { ret = ceph_auth_init_protocol(ac, protocol); if (ret) { - pr_err("error %d on auth method %s init\n", - ret, ac->ops->name); + pr_err("error %d on auth protocol %d init\n", + ret, protocol); goto out; } } @@ -247,7 +246,7 @@ int ceph_build_auth(struct ceph_auth_client *ac, if (!ac->protocol) return ceph_auth_build_hello(ac, msg_buf, msg_len); BUG_ON(!ac->ops); - if (!ac->ops->is_authenticated(ac)) + if (ac->ops->should_authenticate(ac)) return ceph_build_auth_request(ac, msg_buf, msg_len); return 0; } diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h index 4429a707c021..d38a2fb4a137 100644 --- a/fs/ceph/auth.h +++ b/fs/ceph/auth.h @@ -23,6 +23,12 @@ struct ceph_auth_client_ops { */ int (*is_authenticated)(struct ceph_auth_client *ac); + /* + * true if we should (re)authenticate, e.g., when our tickets + * are getting old and crusty. + */ + int (*should_authenticate)(struct ceph_auth_client *ac); + /* * build requests and process replies during monitor * handshake. if handle_reply returns -EAGAIN, we build diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c index 24407c119291..ad1dc21286c7 100644 --- a/fs/ceph/auth_none.c +++ b/fs/ceph/auth_none.c @@ -31,6 +31,13 @@ static int is_authenticated(struct ceph_auth_client *ac) return !xi->starting; } +static int should_authenticate(struct ceph_auth_client *ac) +{ + struct ceph_auth_none_info *xi = ac->private; + + return xi->starting; +} + /* * the generic auth code decode the global_id, and we carry no actual * authenticate state, so nothing happens here. @@ -98,6 +105,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { .reset = reset, .destroy = destroy, .is_authenticated = is_authenticated, + .should_authenticate = should_authenticate, .handle_reply = handle_reply, .create_authorizer = ceph_auth_none_create_authorizer, .destroy_authorizer = ceph_auth_none_destroy_authorizer, diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c index 7b206231566d..83d4d2785ffe 100644 --- a/fs/ceph/auth_x.c +++ b/fs/ceph/auth_x.c @@ -27,6 +27,17 @@ static int ceph_x_is_authenticated(struct ceph_auth_client *ac) return (ac->want_keys & xi->have_keys) == ac->want_keys; } +static int ceph_x_should_authenticate(struct ceph_auth_client *ac) +{ + struct ceph_x_info *xi = ac->private; + int need; + + ceph_x_validate_tickets(ac, &need); + dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", + ac->want_keys, need, xi->have_keys); + return need != 0; +} + static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + @@ -620,6 +631,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, + .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 0dd0b81e64f7..ae3e3a306445 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1776,9 +1776,9 @@ out: spin_unlock(&ci->i_unsafe_lock); } -int ceph_fsync(struct file *file, struct dentry *dentry, int datasync) +int ceph_fsync(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); unsigned flush_tid; int ret; diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 3b9eeed097b3..2fa992eaf7da 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -265,16 +265,17 @@ extern const char *ceph_mds_state_name(int s); * - they also define the lock ordering by the MDS * - a few of these are internal to the mds */ -#define CEPH_LOCK_DN 1 -#define CEPH_LOCK_ISNAP 2 -#define CEPH_LOCK_IVERSION 4 /* mds internal */ -#define CEPH_LOCK_IFILE 8 /* mds internal */ -#define CEPH_LOCK_IAUTH 32 -#define CEPH_LOCK_ILINK 64 -#define CEPH_LOCK_IDFT 128 /* dir frag tree */ -#define CEPH_LOCK_INEST 256 /* mds internal */ -#define CEPH_LOCK_IXATTR 512 -#define CEPH_LOCK_INO 2048 /* immutable inode bits; not a lock */ +#define CEPH_LOCK_DVERSION 1 +#define CEPH_LOCK_DN 2 +#define CEPH_LOCK_ISNAP 16 +#define CEPH_LOCK_IVERSION 32 /* mds internal */ +#define CEPH_LOCK_IFILE 64 +#define CEPH_LOCK_IAUTH 128 +#define CEPH_LOCK_ILINK 256 +#define CEPH_LOCK_IDFT 512 /* dir frag tree */ +#define CEPH_LOCK_INEST 1024 /* mds internal */ +#define CEPH_LOCK_IXATTR 2048 +#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ /* client_session ops */ enum { diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4fd30900eff7..f85719310db2 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -587,7 +587,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); if (IS_ERR(req)) - return ERR_PTR(PTR_ERR(req)); + return ERR_CAST(req); req->r_dentry = dget(dentry); req->r_num_caps = 2; /* we only need inode linkage */ @@ -1107,10 +1107,9 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, * an fsync() on a dir will wait for any uncommitted directory * operations to commit. */ -static int ceph_dir_fsync(struct file *file, struct dentry *dentry, - int datasync) +static int ceph_dir_fsync(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_path.dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct list_head *head = &ci->i_unsafe_dirops; struct ceph_mds_request *req; diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 17447644d675..4480cb1c63e7 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -133,7 +133,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb, req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH, USE_ANY_MDS); if (IS_ERR(req)) - return ERR_PTR(PTR_ERR(req)); + return ERR_CAST(req); req->r_ino1 = vino; req->r_ino2.ino = cfh->parent_ino; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 6512b6701b9e..6251a1574b94 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -230,7 +230,7 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, /* do the open */ req = prepare_open_request(dir->i_sb, flags, mode); if (IS_ERR(req)) - return ERR_PTR(PTR_ERR(req)); + return ERR_CAST(req); req->r_dentry = dget(dentry); req->r_num_caps = 2; if (flags & O_CREAT) { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index a81b8b662c7b..226f5a50d362 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -69,7 +69,7 @@ struct inode *ceph_get_snapdir(struct inode *parent) BUG_ON(!S_ISDIR(parent->i_mode)); if (IS_ERR(inode)) - return ERR_PTR(PTR_ERR(inode)); + return inode; inode->i_mode = parent->i_mode; inode->i_uid = parent->i_uid; inode->i_gid = parent->i_gid; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 885aa5710cfd..b49f12822cbc 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1768,12 +1768,12 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, mutex_unlock(&mdsc->mutex); dout("do_request waiting\n"); if (req->r_timeout) { - err = (long)wait_for_completion_interruptible_timeout( + err = (long)wait_for_completion_killable_timeout( &req->r_completion, req->r_timeout); if (err == 0) err = -EIO; } else { - err = wait_for_completion_interruptible(&req->r_completion); + err = wait_for_completion_killable(&req->r_completion); } dout("do_request waited, got %d\n", err); mutex_lock(&mdsc->mutex); @@ -2014,16 +2014,21 @@ static void handle_forward(struct ceph_mds_client *mdsc, mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { - dout("forward %llu to mds%d - req dne\n", tid, next_mds); + dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); goto out; /* dup reply? */ } - if (fwd_seq <= req->r_num_fwd) { - dout("forward %llu to mds%d - old seq %d <= %d\n", + if (req->r_aborted) { + dout("forward tid %llu aborted, unregistering\n", tid); + __unregister_request(mdsc, req); + } else if (fwd_seq <= req->r_num_fwd) { + dout("forward tid %llu to mds%d - old seq %d <= %d\n", tid, next_mds, req->r_num_fwd, fwd_seq); } else { /* resend. forward race not possible; mds would drop */ - dout("forward %llu to mds%d (we resend)\n", tid, next_mds); + dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); + BUG_ON(req->r_err); + BUG_ON(req->r_got_result); req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; put_request_session(req); @@ -2541,7 +2546,7 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, return; lease = msg->front.iov_base; lease->action = action; - lease->mask = cpu_to_le16(CEPH_LOCK_DN); + lease->mask = cpu_to_le16(1); lease->ino = cpu_to_le64(ceph_vino(inode).ino); lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); lease->seq = cpu_to_le32(seq); @@ -2571,7 +2576,7 @@ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, BUG_ON(inode == NULL); BUG_ON(dentry == NULL); - BUG_ON(mask != CEPH_LOCK_DN); + BUG_ON(mask == 0); /* is dentry lease valid? */ spin_lock(&dentry->d_lock); diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 60b74839ebec..64b8b1f7863d 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -120,6 +120,12 @@ void ceph_msgr_exit(void) destroy_workqueue(ceph_msgr_wq); } +void ceph_msgr_flush() +{ + flush_workqueue(ceph_msgr_wq); +} + + /* * socket callback functions */ diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 00a9430b1ffc..76fbc957bc13 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -213,6 +213,7 @@ extern int ceph_parse_ips(const char *c, const char *end, extern int ceph_msgr_init(void); extern void ceph_msgr_exit(void); +extern void ceph_msgr_flush(void); extern struct ceph_messenger *ceph_messenger_create( struct ceph_entity_addr *myaddr); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index f6510a476e7e..21c62e9b7d1d 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -704,8 +704,11 @@ static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { int ret; + int was_auth = 0; mutex_lock(&monc->mutex); + if (monc->auth->ops) + was_auth = monc->auth->ops->is_authenticated(monc->auth); monc->pending_auth = 0; ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, @@ -716,7 +719,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, wake_up(&monc->client->auth_wq); } else if (ret > 0) { __send_prepared_auth_request(monc, ret); - } else if (monc->auth->ops->is_authenticated(monc->auth)) { + } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index afa7bb3895c4..d25b4add85b4 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -361,8 +361,13 @@ static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); - if (atomic_dec_and_test(&osd->o_ref)) + if (atomic_dec_and_test(&osd->o_ref)) { + struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; + + if (osd->o_authorizer) + ac->ops->destroy_authorizer(ac, osd->o_authorizer); kfree(osd); + } } /* diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index cfdd8f4388b7..ddc656fb5c05 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -706,7 +706,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, len, *p, end); newcrush = crush_decode(*p, min(*p+len, end)); if (IS_ERR(newcrush)) - return ERR_PTR(PTR_ERR(newcrush)); + return ERR_CAST(newcrush); } /* new flags? */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 7c663d9b9f81..4e0bee240b9d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -669,9 +669,17 @@ static void ceph_destroy_client(struct ceph_client *client) /* unmount */ ceph_mdsc_stop(&client->mdsc); - ceph_monc_stop(&client->monc); ceph_osdc_stop(&client->osdc); + /* + * make sure mds and osd connections close out before destroying + * the auth module, which is needed to free those connections' + * ceph_authorizers. + */ + ceph_msgr_flush(); + + ceph_monc_stop(&client->monc); + ceph_adjust_min_caps(-client->min_caps); ceph_debugfs_client_cleanup(client); @@ -738,7 +746,7 @@ static struct dentry *open_root_dentry(struct ceph_client *client, dout("open_root_inode opening '%s'\n", path); req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); if (IS_ERR(req)) - return ERR_PTR(PTR_ERR(req)); + return ERR_CAST(req); req->r_path1 = kstrdup(path, GFP_NOFS); req->r_ino1.ino = CEPH_INO_ROOT; req->r_ino1.snap = CEPH_NOSNAP; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3725c9ee9d08..10a4a406e887 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -811,7 +810,7 @@ extern void ceph_put_cap(struct ceph_cap *cap); extern void ceph_queue_caps_release(struct inode *inode); extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); -extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync); +extern int ceph_fsync(struct file *file, int datasync); extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session); extern int ceph_get_cap_mds(struct inode *inode); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 0242ff9cbf41..a7eb65c84b1c 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -84,7 +84,7 @@ extern ssize_t cifs_user_read(struct file *file, char __user *read_data, extern ssize_t cifs_user_write(struct file *file, const char __user *write_data, size_t write_size, loff_t *poffset); extern int cifs_lock(struct file *, int, struct file_lock *); -extern int cifs_fsync(struct file *, struct dentry *, int); +extern int cifs_fsync(struct file *, int); extern int cifs_flush(struct file *, fl_owner_t id); extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a83541ec9713..f1ff785b2292 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1676,7 +1676,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, return rc; } -int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) +int cifs_fsync(struct file *file, int datasync) { int xid; int rc = 0; @@ -1688,7 +1688,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) xid = GetXid(); cFYI(1, "Sync file - name: %s datasync: 0x%x", - dentry->d_name.name, datasync); + file->f_path.dentry->d_name.name, datasync); rc = filemap_write_and_wait(inode->i_mapping); if (rc == 0) { diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h index d99860a33890..6b443ff43a19 100644 --- a/fs/coda/coda_int.h +++ b/fs/coda/coda_int.h @@ -11,8 +11,7 @@ extern int coda_fake_statfs; void coda_destroy_inodecache(void); int coda_init_inodecache(void); -int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, - int datasync); +int coda_fsync(struct file *coda_file, int datasync); void coda_sysctl_init(void); void coda_sysctl_clean(void); diff --git a/fs/coda/file.c b/fs/coda/file.c index 7196077b1688..ad3cd2abeeb4 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -202,10 +202,10 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) return 0; } -int coda_fsync(struct file *coda_file, struct dentry *coda_dentry, int datasync) +int coda_fsync(struct file *coda_file, int datasync) { struct file *host_file; - struct inode *coda_inode = coda_dentry->d_inode; + struct inode *coda_inode = coda_file->f_path.dentry->d_inode; struct coda_file_info *cfi; int err = 0; diff --git a/fs/compat.c b/fs/compat.c index 05448730f840..f0b391c50552 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -568,6 +568,79 @@ out: return ret; } +/* A write operation does a read from user space and vice versa */ +#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ) + +ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer) +{ + compat_ssize_t tot_len; + struct iovec *iov = *ret_pointer = fast_pointer; + ssize_t ret = 0; + int seg; + + /* + * SuS says "The readv() function *may* fail if the iovcnt argument + * was less than or equal to 0, or greater than {IOV_MAX}. Linux has + * traditionally returned zero for zero segments, so... + */ + if (nr_segs == 0) + goto out; + + ret = -EINVAL; + if (nr_segs > UIO_MAXIOV || nr_segs < 0) + goto out; + if (nr_segs > fast_segs) { + ret = -ENOMEM; + iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); + if (iov == NULL) { + *ret_pointer = fast_pointer; + goto out; + } + } + *ret_pointer = iov; + + /* + * Single unix specification: + * We should -EINVAL if an element length is not >= 0 and fitting an + * ssize_t. The total length is fitting an ssize_t + * + * Be careful here because iov_len is a size_t not an ssize_t + */ + tot_len = 0; + ret = -EINVAL; + for (seg = 0; seg < nr_segs; seg++) { + compat_ssize_t tmp = tot_len; + compat_uptr_t buf; + compat_ssize_t len; + + if (__get_user(len, &uvector->iov_len) || + __get_user(buf, &uvector->iov_base)) { + ret = -EFAULT; + goto out; + } + if (len < 0) /* size_t not fitting in compat_ssize_t .. */ + goto out; + tot_len += len; + if (tot_len < tmp) /* maths overflow on the compat_ssize_t */ + goto out; + if (!access_ok(vrfy_dir(type), buf, len)) { + ret = -EFAULT; + goto out; + } + iov->iov_base = compat_ptr(buf); + iov->iov_len = (compat_size_t) len; + uvector++; + iov++; + } + ret = tot_len; + +out: + return ret; +} + static inline long copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) { @@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb) iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); ret = copy_iocb(nr, iocb, iocb64); if (!ret) - ret = sys_io_submit(ctx_id, nr, iocb64); + ret = do_io_submit(ctx_id, nr, iocb64, 1); return ret; } @@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, { compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov=iovstack, *vector; + struct iovec *iov; ssize_t ret; - int seg; io_fn_t fn; iov_fn_t fnv; - /* - * SuS says "The readv() function *may* fail if the iovcnt argument - * was less than or equal to 0, or greater than {IOV_MAX}. Linux has - * traditionally returned zero for zero segments, so... - */ - ret = 0; - if (nr_segs == 0) - goto out; - - /* - * First get the "struct iovec" from user memory and - * verify all the pointers - */ ret = -EINVAL; - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0)) - goto out; if (!file->f_op) goto out; - if (nr_segs > UIO_FASTIOV) { - ret = -ENOMEM; - iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); - if (!iov) - goto out; - } + ret = -EFAULT; if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) goto out; - /* - * Single unix specification: - * We should -EINVAL if an element length is not >= 0 and fitting an - * ssize_t. The total length is fitting an ssize_t - * - * Be careful here because iov_len is a size_t not an ssize_t - */ - tot_len = 0; - vector = iov; - ret = -EINVAL; - for (seg = 0 ; seg < nr_segs; seg++) { - compat_ssize_t tmp = tot_len; - compat_ssize_t len; - compat_uptr_t buf; - - if (__get_user(len, &uvector->iov_len) || - __get_user(buf, &uvector->iov_base)) { - ret = -EFAULT; - goto out; - } - if (len < 0) /* size_t not fitting an compat_ssize_t .. */ - goto out; - tot_len += len; - if (tot_len < tmp) /* maths overflow on the compat_ssize_t */ - goto out; - vector->iov_base = compat_ptr(buf); - vector->iov_len = (compat_size_t) len; - uvector++; - vector++; - } + tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, + UIO_FASTIOV, iovstack, &iov); if (tot_len == 0) { ret = 0; goto out; diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index c8af2d91174b..41645142b88b 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -72,16 +72,11 @@ int configfs_setattr(struct dentry * dentry, struct iattr * iattr) if (!sd) return -EINVAL; - sd_iattr = sd->s_iattr; - - error = inode_change_ok(inode, iattr); - if (error) - return error; - - error = inode_setattr(inode, iattr); + error = simple_setattr(dentry, iattr); if (error) return error; + sd_iattr = sd->s_iattr; if (!sd_iattr) { /* setting attributes for the first time, allocate now */ sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL); diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 4d74fc72c195..0210898458b2 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -277,8 +277,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n" DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n"); DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n"); + /* - * debugfs_create_x{8,16,32} - create a debugfs file that is used to read and write an unsigned {8,16,32}-bit value + * debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value * * These functions are exactly the same as the above functions (but use a hex * output for the decimal challenged). For details look at the above unsigned @@ -357,6 +359,23 @@ struct dentry *debugfs_create_x32(const char *name, mode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_x32); +/** + * debugfs_create_x64 - create a debugfs file that is used to read and write an unsigned 64-bit value + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is %NULL, then the + * file will be created in the root of the debugfs filesystem. + * @value: a pointer to the variable that the file should read to and write + * from. + */ +struct dentry *debugfs_create_x64(const char *name, mode_t mode, + struct dentry *parent, u64 *value) +{ + return debugfs_create_file(name, mode, parent, value, &fops_x64); +} +EXPORT_SYMBOL_GPL(debugfs_create_x64); + static int debugfs_size_t_set(void *data, u64 val) { diff --git a/fs/direct-io.c b/fs/direct-io.c index e82adc2debb7..7600aacf531d 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -82,6 +82,8 @@ struct dio { int reap_counter; /* rate limit reaping */ get_block_t *get_block; /* block mapping function */ dio_iodone_t *end_io; /* IO completion function */ + dio_submit_t *submit_io; /* IO submition function */ + loff_t logical_offset_in_bio; /* current first logical block in bio */ sector_t final_block_in_bio; /* current final block in bio + 1 */ sector_t next_block_for_io; /* next block to be put under IO, in dio_blocks units */ @@ -96,6 +98,7 @@ struct dio { unsigned cur_page_offset; /* Offset into it, in bytes */ unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ sector_t cur_page_block; /* Where it starts */ + loff_t cur_page_fs_offset; /* Offset in file */ /* BIO completion state */ spinlock_t bio_lock; /* protects BIO fields below */ @@ -300,6 +303,26 @@ static void dio_bio_end_io(struct bio *bio, int error) spin_unlock_irqrestore(&dio->bio_lock, flags); } +/** + * dio_end_io - handle the end io action for the given bio + * @bio: The direct io bio thats being completed + * @error: Error if there was one + * + * This is meant to be called by any filesystem that uses their own dio_submit_t + * so that the DIO specific endio actions are dealt with after the filesystem + * has done it's completion work. + */ +void dio_end_io(struct bio *bio, int error) +{ + struct dio *dio = bio->bi_private; + + if (dio->is_async) + dio_bio_end_aio(bio, error); + else + dio_bio_end_io(bio, error); +} +EXPORT_SYMBOL_GPL(dio_end_io); + static int dio_bio_alloc(struct dio *dio, struct block_device *bdev, sector_t first_sector, int nr_vecs) @@ -316,6 +339,7 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev, bio->bi_end_io = dio_bio_end_io; dio->bio = bio; + dio->logical_offset_in_bio = dio->cur_page_fs_offset; return 0; } @@ -340,10 +364,15 @@ static void dio_bio_submit(struct dio *dio) if (dio->is_async && dio->rw == READ) bio_set_pages_dirty(bio); - submit_bio(dio->rw, bio); + if (dio->submit_io) + dio->submit_io(dio->rw, bio, dio->inode, + dio->logical_offset_in_bio); + else + submit_bio(dio->rw, bio); dio->bio = NULL; dio->boundary = 0; + dio->logical_offset_in_bio = 0; } /* @@ -603,10 +632,26 @@ static int dio_send_cur_page(struct dio *dio) int ret = 0; if (dio->bio) { + loff_t cur_offset = dio->block_in_file << dio->blkbits; + loff_t bio_next_offset = dio->logical_offset_in_bio + + dio->bio->bi_size; + /* - * See whether this new request is contiguous with the old + * See whether this new request is contiguous with the old. + * + * Btrfs cannot handl having logically non-contiguous requests + * submitted. For exmple if you have + * + * Logical: [0-4095][HOLE][8192-12287] + * Phyiscal: [0-4095] [4096-8181] + * + * We cannot submit those pages together as one BIO. So if our + * current logical offset in the file does not equal what would + * be the next logical offset in the bio, submit the bio we + * have. */ - if (dio->final_block_in_bio != dio->cur_page_block) + if (dio->final_block_in_bio != dio->cur_page_block || + cur_offset != bio_next_offset) dio_bio_submit(dio); /* * Submit now if the underlying fs is about to perform a @@ -701,6 +746,7 @@ submit_page_section(struct dio *dio, struct page *page, dio->cur_page_offset = offset; dio->cur_page_len = len; dio->cur_page_block = blocknr; + dio->cur_page_fs_offset = dio->block_in_file << dio->blkbits; out: return ret; } @@ -935,7 +981,7 @@ static ssize_t direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs, unsigned blkbits, get_block_t get_block, dio_iodone_t end_io, - struct dio *dio) + dio_submit_t submit_io, struct dio *dio) { unsigned long user_addr; unsigned long flags; @@ -952,6 +998,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, dio->get_block = get_block; dio->end_io = end_io; + dio->submit_io = submit_io; dio->final_block_in_bio = -1; dio->next_block_for_io = -1; @@ -1008,7 +1055,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, } } /* end iovec loop */ - if (ret == -ENOTBLK && (rw & WRITE)) { + if (ret == -ENOTBLK) { /* * The remaining part of the request will be * be handled by buffered I/O when we return @@ -1087,30 +1134,11 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, return ret; } -/* - * This is a library function for use by filesystem drivers. - * - * The locking rules are governed by the flags parameter: - * - if the flags value contains DIO_LOCKING we use a fancy locking - * scheme for dumb filesystems. - * For writes this function is called under i_mutex and returns with - * i_mutex held, for reads, i_mutex is not held on entry, but it is - * taken and dropped again before returning. - * For reads and writes i_alloc_sem is taken in shared mode and released - * on I/O completion (which may happen asynchronously after returning to - * the caller). - * - * - if the flags value does NOT contain DIO_LOCKING we don't use any - * internal locking but rather rely on the filesystem to synchronize - * direct I/O reads/writes versus each other and truncate. - * For reads and writes both i_mutex and i_alloc_sem are not held on - * entry and are never taken. - */ ssize_t -__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, +__blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, - int flags) + dio_submit_t submit_io, int flags) { int seg; size_t size; @@ -1197,11 +1225,49 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, (end > i_size_read(inode))); retval = direct_io_worker(rw, iocb, inode, iov, offset, - nr_segs, blkbits, get_block, end_io, dio); + nr_segs, blkbits, get_block, end_io, + submit_io, dio); + +out: + return retval; +} +EXPORT_SYMBOL(__blockdev_direct_IO_newtrunc); + +/* + * This is a library function for use by filesystem drivers. + * + * The locking rules are governed by the flags parameter: + * - if the flags value contains DIO_LOCKING we use a fancy locking + * scheme for dumb filesystems. + * For writes this function is called under i_mutex and returns with + * i_mutex held, for reads, i_mutex is not held on entry, but it is + * taken and dropped again before returning. + * For reads and writes i_alloc_sem is taken in shared mode and released + * on I/O completion (which may happen asynchronously after returning to + * the caller). + * + * - if the flags value does NOT contain DIO_LOCKING we don't use any + * internal locking but rather rely on the filesystem to synchronize + * direct I/O reads/writes versus each other and truncate. + * For reads and writes both i_mutex and i_alloc_sem are not held on + * entry and are never taken. + */ +ssize_t +__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int flags) +{ + ssize_t retval; + retval = __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, + offset, nr_segs, get_block, end_io, submit_io, flags); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again for DIO_LOCKING. + * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this in + * their own manner. This is a further example of where the old + * truncate sequence is inadequate. * * NOTE: filesystems with their own locking have to handle this * on their own. @@ -1209,12 +1275,13 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, if (flags & DIO_LOCKING) { if (unlikely((rw & WRITE) && retval < 0)) { loff_t isize = i_size_read(inode); + loff_t end = offset + iov_length(iov, nr_segs); + if (end > isize) vmtruncate(inode, isize); } } -out: return retval; } EXPORT_SYMBOL(__blockdev_direct_IO); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 3bdddbcc785f..e8fcf4e2ed7d 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -274,7 +274,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file) } static int -ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync) +ecryptfs_fsync(struct file *file, int datasync) { return vfs_fsync(ecryptfs_file_to_lower(file), datasync); } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 65dee2f336ae..31ef5252f0fe 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -805,7 +805,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, - (ia->ia_size & ~PAGE_CACHE_MASK)); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { - rc = vmtruncate(inode, ia->ia_size); + rc = simple_setsize(inode, ia->ia_size); if (rc) goto out; lower_ia->ia_size = ia->ia_size; @@ -830,7 +830,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, goto out; } } - vmtruncate(inode, ia->ia_size); + simple_setsize(inode, ia->ia_size); rc = ecryptfs_write_inode_size_to_metadata(inode); if (rc) { printk(KERN_ERR "Problem with " diff --git a/fs/exec.c b/fs/exec.c index 9badbc0bfb1d..e19de6a80339 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -768,7 +768,6 @@ static int de_thread(struct task_struct *tsk) struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; - int count; if (thread_group_empty(tsk)) goto no_thread_group; @@ -785,13 +784,13 @@ static int de_thread(struct task_struct *tsk) spin_unlock_irq(lock); return -EAGAIN; } + sig->group_exit_task = tsk; - zap_other_threads(tsk); + sig->notify_count = zap_other_threads(tsk); + if (!thread_group_leader(tsk)) + sig->notify_count--; - /* Account for the thread group leader hanging around: */ - count = thread_group_leader(tsk) ? 1 : 2; - sig->notify_count = count; - while (atomic_read(&sig->count) > count) { + while (sig->notify_count) { __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irq(lock); schedule(); @@ -1662,12 +1661,15 @@ static int coredump_wait(int exit_code, struct core_state *core_state) struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; struct completion *vfork_done; - int core_waiters; + int core_waiters = -EBUSY; init_completion(&core_state->startup); core_state->dumper.task = tsk; core_state->dumper.next = NULL; - core_waiters = zap_threads(tsk, mm, core_state, exit_code); + + down_write(&mm->mmap_sem); + if (!mm->core_state) + core_waiters = zap_threads(tsk, mm, core_state, exit_code); up_write(&mm->mmap_sem); if (unlikely(core_waiters < 0)) @@ -1787,21 +1789,61 @@ static void wait_for_dump_helpers(struct file *file) } +/* + * uhm_pipe_setup + * helper function to customize the process used + * to collect the core in userspace. Specifically + * it sets up a pipe and installs it as fd 0 (stdin) + * for the process. Returns 0 on success, or + * PTR_ERR on failure. + * Note that it also sets the core limit to 1. This + * is a special value that we use to trap recursive + * core dumps + */ +static int umh_pipe_setup(struct subprocess_info *info) +{ + struct file *rp, *wp; + struct fdtable *fdt; + struct coredump_params *cp = (struct coredump_params *)info->data; + struct files_struct *cf = current->files; + + wp = create_write_pipe(0); + if (IS_ERR(wp)) + return PTR_ERR(wp); + + rp = create_read_pipe(wp, 0); + if (IS_ERR(rp)) { + free_write_pipe(wp); + return PTR_ERR(rp); + } + + cp->file = wp; + + sys_close(0); + fd_install(0, rp); + spin_lock(&cf->file_lock); + fdt = files_fdtable(cf); + FD_SET(0, fdt->open_fds); + FD_CLR(0, fdt->close_on_exec); + spin_unlock(&cf->file_lock); + + /* and disallow core files too */ + current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; + + return 0; +} + void do_coredump(long signr, int exit_code, struct pt_regs *regs) { struct core_state core_state; char corename[CORENAME_MAX_SIZE + 1]; struct mm_struct *mm = current->mm; struct linux_binfmt * binfmt; - struct inode * inode; const struct cred *old_cred; struct cred *cred; int retval = 0; int flag = 0; - int ispipe = 0; - char **helper_argv = NULL; - int helper_argc = 0; - int dump_count = 0; + int ispipe; static atomic_t core_dump_count = ATOMIC_INIT(0); struct coredump_params cprm = { .signr = signr, @@ -1820,23 +1862,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; - - cred = prepare_creds(); - if (!cred) { - retval = -ENOMEM; + if (!__get_dumpable(cprm.mm_flags)) goto fail; - } - down_write(&mm->mmap_sem); - /* - * If another thread got here first, or we are not dumpable, bail out. - */ - if (mm->core_state || !__get_dumpable(cprm.mm_flags)) { - up_write(&mm->mmap_sem); - put_cred(cred); + cred = prepare_creds(); + if (!cred) goto fail; - } - /* * We cannot trust fsuid as being the "true" uid of the * process nor do we know its entire history. We only know it @@ -1849,10 +1880,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) } retval = coredump_wait(exit_code, &core_state); - if (retval < 0) { - put_cred(cred); - goto fail; - } + if (retval < 0) + goto fail_creds; old_cred = override_creds(cred); @@ -1870,19 +1899,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) ispipe = format_corename(corename, signr); unlock_kernel(); - if ((!ispipe) && (cprm.limit < binfmt->min_coredump)) - goto fail_unlock; - if (ispipe) { - if (cprm.limit == 0) { + int dump_count; + char **helper_argv; + + if (cprm.limit == 1) { /* * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use - * cprm.limit of 0 here as a speacial value. Any - * non-zero limit gets set to RLIM_INFINITY below, but + * cprm.limit of 1 here as a speacial value. Any + * non-1 limit gets set to RLIM_INFINITY below, but * a limit of 0 skips the dump. This is a consistent * way to catch recursive crashes. We can still crash - * if the core_pattern binary sets RLIM_CORE = !0 + * if the core_pattern binary sets RLIM_CORE = !1 * but it runs as root, and can do lots of stupid things * Note that we use task_tgid_vnr here to grab the pid * of the process group leader. That way we get the @@ -1890,11 +1919,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) * core_pattern process dies. */ printk(KERN_WARNING - "Process %d(%s) has RLIMIT_CORE set to 0\n", + "Process %d(%s) has RLIMIT_CORE set to 1\n", task_tgid_vnr(current), current->comm); printk(KERN_WARNING "Aborting core\n"); goto fail_unlock; } + cprm.limit = RLIM_INFINITY; dump_count = atomic_inc_return(&core_dump_count); if (core_pipe_limit && (core_pipe_limit < dump_count)) { @@ -1904,71 +1934,74 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) goto fail_dropcount; } - helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); + helper_argv = argv_split(GFP_KERNEL, corename+1, NULL); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); goto fail_dropcount; } - cprm.limit = RLIM_INFINITY; - - /* SIGPIPE can happen, but it's just never processed */ - if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, - &cprm.file)) { + retval = call_usermodehelper_fns(helper_argv[0], helper_argv, + NULL, UMH_WAIT_EXEC, umh_pipe_setup, + NULL, &cprm); + argv_free(helper_argv); + if (retval) { printk(KERN_INFO "Core dump to %s pipe failed\n", corename); - goto fail_dropcount; + goto close_fail; } - } else + } else { + struct inode *inode; + + if (cprm.limit < binfmt->min_coredump) + goto fail_unlock; + cprm.file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600); - if (IS_ERR(cprm.file)) - goto fail_dropcount; - inode = cprm.file->f_path.dentry->d_inode; - if (inode->i_nlink > 1) - goto close_fail; /* multiple links - don't dump */ - if (!ispipe && d_unhashed(cprm.file->f_path.dentry)) - goto close_fail; - - /* AK: actually i see no reason to not allow this for named pipes etc., - but keep the previous behaviour for now. */ - if (!ispipe && !S_ISREG(inode->i_mode)) - goto close_fail; - /* - * Dont allow local users get cute and trick others to coredump - * into their pre-created files: - * Note, this is not relevant for pipes - */ - if (!ispipe && (inode->i_uid != current_fsuid())) - goto close_fail; - if (!cprm.file->f_op) - goto close_fail; - if (!cprm.file->f_op->write) - goto close_fail; - if (!ispipe && - do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file) != 0) - goto close_fail; + if (IS_ERR(cprm.file)) + goto fail_unlock; - retval = binfmt->core_dump(&cprm); + inode = cprm.file->f_path.dentry->d_inode; + if (inode->i_nlink > 1) + goto close_fail; + if (d_unhashed(cprm.file->f_path.dentry)) + goto close_fail; + /* + * AK: actually i see no reason to not allow this for named + * pipes etc, but keep the previous behaviour for now. + */ + if (!S_ISREG(inode->i_mode)) + goto close_fail; + /* + * Dont allow local users get cute and trick others to coredump + * into their pre-created files. + */ + if (inode->i_uid != current_fsuid()) + goto close_fail; + if (!cprm.file->f_op || !cprm.file->f_op->write) + goto close_fail; + if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) + goto close_fail; + } + retval = binfmt->core_dump(&cprm); if (retval) current->signal->group_exit_code |= 0x80; -close_fail: + if (ispipe && core_pipe_limit) wait_for_dump_helpers(cprm.file); - filp_close(cprm.file, NULL); +close_fail: + if (cprm.file) + filp_close(cprm.file, NULL); fail_dropcount: - if (dump_count) + if (ispipe) atomic_dec(&core_dump_count); fail_unlock: - if (helper_argv) - argv_free(helper_argv); - + coredump_finish(mm); revert_creds(old_cred); +fail_creds: put_cred(cred); - coredump_finish(mm); fail: return; } diff --git a/fs/exofs/file.c b/fs/exofs/file.c index 839b9dc1e70f..fef6899be397 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -40,12 +40,11 @@ static int exofs_release_file(struct inode *inode, struct file *filp) return 0; } -static int exofs_file_fsync(struct file *filp, struct dentry *dentry, - int datasync) +static int exofs_file_fsync(struct file *filp, int datasync) { int ret; struct address_space *mapping = filp->f_mapping; - struct inode *inode = dentry->d_inode; + struct inode *inode = mapping->host; struct super_block *sb; ret = filemap_write_and_wait(mapping); @@ -66,7 +65,7 @@ static int exofs_file_fsync(struct file *filp, struct dentry *dentry, static int exofs_flush(struct file *file, fl_owner_t id) { - exofs_file_fsync(file, file->f_path.dentry, 1); + exofs_file_fsync(file, 1); /* TODO: Flush the OSD target */ return 0; } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 0b038e47ad2f..52b34f1d2738 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -122,7 +122,6 @@ extern int ext2_write_inode (struct inode *, struct writeback_control *); extern void ext2_delete_inode (struct inode *); extern int ext2_sync_inode (struct inode *); extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int); -extern void ext2_truncate (struct inode *); extern int ext2_setattr (struct dentry *, struct iattr *); extern void ext2_set_inode_flags(struct inode *inode); extern void ext2_get_inode_flags(struct ext2_inode_info *); @@ -155,7 +154,7 @@ extern void ext2_write_super (struct super_block *); extern const struct file_operations ext2_dir_operations; /* file.c */ -extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync); +extern int ext2_fsync(struct file *file, int datasync); extern const struct inode_operations ext2_file_inode_operations; extern const struct file_operations ext2_file_operations; extern const struct file_operations ext2_xip_file_operations; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 5d198d0697fb..49eec9456c5b 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -40,13 +40,13 @@ static int ext2_release_file (struct inode * inode, struct file * filp) return 0; } -int ext2_fsync(struct file *file, struct dentry *dentry, int datasync) +int ext2_fsync(struct file *file, int datasync) { int ret; - struct super_block *sb = dentry->d_inode->i_sb; + struct super_block *sb = file->f_mapping->host->i_sb; struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; - ret = simple_fsync(file, dentry, datasync); + ret = generic_file_fsync(file, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, @@ -95,7 +95,6 @@ const struct file_operations ext2_xip_file_operations = { #endif const struct inode_operations ext2_file_inode_operations = { - .truncate = ext2_truncate, #ifdef CONFIG_EXT2_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 527c46d9bc1f..19214435b752 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -54,6 +54,18 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode) inode->i_blocks - ea_blocks == 0); } +static void ext2_truncate_blocks(struct inode *inode, loff_t offset); + +static void ext2_write_failed(struct address_space *mapping, loff_t to) +{ + struct inode *inode = mapping->host; + + if (to > inode->i_size) { + truncate_pagecache(inode, to, inode->i_size); + ext2_truncate_blocks(inode, inode->i_size); + } +} + /* * Called at the last iput() if i_nlink is zero. */ @@ -71,7 +83,7 @@ void ext2_delete_inode (struct inode * inode) inode->i_size = 0; if (inode->i_blocks) - ext2_truncate (inode); + ext2_truncate_blocks(inode, 0); ext2_free_inode (inode); return; @@ -757,8 +769,8 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - ext2_get_block); + return block_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, ext2_get_block); } static int @@ -766,8 +778,25 @@ ext2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { + int ret; + *pagep = NULL; - return __ext2_write_begin(file, mapping, pos, len, flags, pagep,fsdata); + ret = __ext2_write_begin(file, mapping, pos, len, flags, pagep, fsdata); + if (ret < 0) + ext2_write_failed(mapping, pos + len); + return ret; +} + +static int ext2_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + if (ret < len) + ext2_write_failed(mapping, pos + len); + return ret; } static int @@ -775,13 +804,18 @@ ext2_nobh_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { + int ret; + /* * Dir-in-pagecache still uses ext2_write_begin. Would have to rework * directory handling code to pass around offsets rather than struct * pages in order to make this work easily. */ - return nobh_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - ext2_get_block); + ret = nobh_write_begin_newtrunc(file, mapping, pos, len, flags, pagep, + fsdata, ext2_get_block); + if (ret < 0) + ext2_write_failed(mapping, pos + len); + return ret; } static int ext2_nobh_writepage(struct page *page, @@ -800,10 +834,15 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - - return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, ext2_get_block, NULL); + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; + + ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev, + iov, offset, nr_segs, ext2_get_block, NULL); + if (ret < 0 && (rw & WRITE)) + ext2_write_failed(mapping, offset + iov_length(iov, nr_segs)); + return ret; } static int @@ -818,7 +857,7 @@ const struct address_space_operations ext2_aops = { .writepage = ext2_writepage, .sync_page = block_sync_page, .write_begin = ext2_write_begin, - .write_end = generic_write_end, + .write_end = ext2_write_end, .bmap = ext2_bmap, .direct_IO = ext2_direct_IO, .writepages = ext2_writepages, @@ -1027,7 +1066,7 @@ static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int de ext2_free_data(inode, p, q); } -void ext2_truncate(struct inode *inode) +static void __ext2_truncate_blocks(struct inode *inode, loff_t offset) { __le32 *i_data = EXT2_I(inode)->i_data; struct ext2_inode_info *ei = EXT2_I(inode); @@ -1039,27 +1078,8 @@ void ext2_truncate(struct inode *inode) int n; long iblock; unsigned blocksize; - - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode))) - return; - if (ext2_inode_is_fast_symlink(inode)) - return; - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) - return; - blocksize = inode->i_sb->s_blocksize; - iblock = (inode->i_size + blocksize-1) - >> EXT2_BLOCK_SIZE_BITS(inode->i_sb); - - if (mapping_is_xip(inode->i_mapping)) - xip_truncate_page(inode->i_mapping, inode->i_size); - else if (test_opt(inode->i_sb, NOBH)) - nobh_truncate_page(inode->i_mapping, - inode->i_size, ext2_get_block); - else - block_truncate_page(inode->i_mapping, - inode->i_size, ext2_get_block); + iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb); n = ext2_block_to_path(inode, iblock, offsets, NULL); if (n == 0) @@ -1127,6 +1147,62 @@ do_indirects: ext2_discard_reservation(inode); mutex_unlock(&ei->truncate_mutex); +} + +static void ext2_truncate_blocks(struct inode *inode, loff_t offset) +{ + /* + * XXX: it seems like a bug here that we don't allow + * IS_APPEND inode to have blocks-past-i_size trimmed off. + * review and fix this. + * + * Also would be nice to be able to handle IO errors and such, + * but that's probably too much to ask. + */ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) + return; + if (ext2_inode_is_fast_symlink(inode)) + return; + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + return; + __ext2_truncate_blocks(inode, offset); +} + +int ext2_setsize(struct inode *inode, loff_t newsize) +{ + loff_t oldsize; + int error; + + error = inode_newsize_ok(inode, newsize); + if (error) + return error; + + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) + return -EINVAL; + if (ext2_inode_is_fast_symlink(inode)) + return -EINVAL; + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + return -EPERM; + + if (mapping_is_xip(inode->i_mapping)) + error = xip_truncate_page(inode->i_mapping, newsize); + else if (test_opt(inode->i_sb, NOBH)) + error = nobh_truncate_page(inode->i_mapping, + newsize, ext2_get_block); + else + error = block_truncate_page(inode->i_mapping, + newsize, ext2_get_block); + if (error) + return error; + + oldsize = inode->i_size; + i_size_write(inode, newsize); + truncate_pagecache(inode, oldsize, newsize); + + __ext2_truncate_blocks(inode, newsize); + inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; if (inode_needs_sync(inode)) { sync_mapping_buffers(inode->i_mapping); @@ -1134,6 +1210,8 @@ do_indirects: } else { mark_inode_dirty(inode); } + + return 0; } static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, @@ -1474,8 +1552,15 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr) if (error) return error; } - error = inode_setattr(inode, iattr); - if (!error && (iattr->ia_valid & ATTR_MODE)) + if (iattr->ia_valid & ATTR_SIZE) { + error = ext2_setsize(inode, iattr->ia_size); + if (error) + return error; + } + generic_setattr(inode, iattr); + if (iattr->ia_valid & ATTR_MODE) error = ext2_acl_chmod(inode); + mark_inode_dirty(inode); + return error; } diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 71e9eb1fa696..7ff43f4a59cd 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -119,6 +119,8 @@ static void ext2_put_super (struct super_block * sb) int i; struct ext2_sb_info *sbi = EXT2_SB(sb); + dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + if (sb->s_dirt) ext2_write_super(sb); @@ -1063,6 +1065,12 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sb->s_op = &ext2_sops; sb->s_export_op = &ext2_export_ops; sb->s_xattr = ext2_xattr_handlers; + +#ifdef CONFIG_QUOTA + sb->dq_op = &dquot_operations; + sb->s_qcop = &dquot_quotactl_ops; +#endif + root = ext2_iget(sb, EXT2_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); @@ -1241,6 +1249,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) spin_unlock(&sbi->s_lock); return 0; } + /* * OK, we are remounting a valid rw partition rdonly, so set * the rdonly flag and then mark the partition as valid again. @@ -1248,6 +1257,13 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) es->s_state = cpu_to_le16(sbi->s_mount_state); es->s_mtime = cpu_to_le32(get_seconds()); spin_unlock(&sbi->s_lock); + + err = dquot_suspend(sb, -1); + if (err < 0) { + spin_lock(&sbi->s_lock); + goto restore_opts; + } + ext2_sync_super(sb, es, 1); } else { __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb, @@ -1269,8 +1285,12 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) if (!ext2_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; spin_unlock(&sbi->s_lock); + ext2_write_super(sb); + + dquot_resume(sb, -1); } + return 0; restore_opts: sbi->s_mount_opt = old_opts.s_mount_opt; diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 373fa90c796a..e2e72c367cf6 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -297,7 +297,7 @@ static void free_rb_tree_fname(struct rb_root *root) kfree (old); } if (!parent) - root->rb_node = NULL; + *root = RB_ROOT; else if (parent->rb_left == n) parent->rb_left = NULL; else if (parent->rb_right == n) diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index fcf7487734b6..d7e9f74dc3a6 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c @@ -43,9 +43,9 @@ * inode to disk. */ -int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) +int ext3_sync_file(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); journal_t *journal = EXT3_SB(inode->i_sb)->s_journal; int ret, needs_barrier = 0; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 0fc1293d0e96..6c953bb255e7 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -410,6 +410,8 @@ static void ext3_put_super (struct super_block * sb) struct ext3_super_block *es = sbi->s_es; int i, err; + dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + lock_kernel(); ext3_xattr_put_super(sb); @@ -748,7 +750,7 @@ static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount); + char *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); @@ -767,12 +769,12 @@ static const struct dquot_operations ext3_quota_operations = { static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk + .quota_off = dquot_quota_off, + .quota_sync = dquot_quota_sync, + .get_info = dquot_get_dqinfo, + .set_info = dquot_set_dqinfo, + .get_dqblk = dquot_get_dqblk, + .set_dqblk = dquot_set_dqblk }; #endif @@ -1527,7 +1529,7 @@ static void ext3_orphan_cleanup (struct super_block * sb, /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) - vfs_quota_off(sb, i, 0); + dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ @@ -2551,6 +2553,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; + int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; @@ -2597,6 +2600,10 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) } if (*flags & MS_RDONLY) { + err = dquot_suspend(sb, -1); + if (err < 0) + goto restore_opts; + /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount @@ -2651,6 +2658,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; + enable_quota = 1; } } #ifdef CONFIG_QUOTA @@ -2662,6 +2670,9 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) #endif unlock_super(sb); unlock_kernel(); + + if (enable_quota) + dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; @@ -2851,24 +2862,21 @@ static int ext3_write_info(struct super_block *sb, int type) */ static int ext3_quota_on_mount(struct super_block *sb, int type) { - return vfs_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], - EXT3_SB(sb)->s_jquota_fmt, type); + return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], + EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, - char *name, int remount) + char *name) { int err; struct path path; if (!test_opt(sb, QUOTA)) return -EINVAL; - /* When remounting, no checks are needed and in fact, name is NULL */ - if (remount) - return vfs_quota_on(sb, type, format_id, name, remount); err = kern_path(name, LOOKUP_FOLLOW, &path); if (err) @@ -2906,7 +2914,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, } } - err = vfs_quota_on_path(sb, type, format_id, &path); + err = dquot_quota_on_path(sb, type, format_id, &path); path_put(&path); return err; } diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d2f37a5516c7..95b7594c76f9 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -591,14 +591,15 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ret = ext4_mb_new_blocks(handle, &ar, errp); if (count) *count = ar.len; - /* - * Account for the allocated meta blocks + * Account for the allocated meta blocks. We will never + * fail EDQUOT for metdata, but we do account for it. */ if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { spin_lock(&EXT4_I(inode)->i_block_reservation_lock); EXT4_I(inode)->i_allocated_meta_blocks += ar.len; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); + dquot_alloc_block_nofail(inode, ar.len); } return ret; } diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 538c48655084..5b6973fbf1bd 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -72,9 +72,9 @@ static int add_system_zone(struct ext4_sb_info *sbi, else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; else { - if (start_blk + count > (entry->start_blk + + if (start_blk + count > (entry->start_blk + entry->count)) - entry->count = (start_blk + count - + entry->count = (start_blk + count - entry->start_blk); new_node = *n; new_entry = rb_entry(new_node, struct ext4_system_zone, diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 86cb6d86a048..ea5e6cb7e2a5 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -83,11 +83,10 @@ int ext4_check_dir_entry(const char *function, struct inode *dir, error_msg = "inode out of bounds"; if (error_msg != NULL) - __ext4_error(dir->i_sb, function, - "bad entry in directory #%lu: %s - block=%llu" + ext4_error_inode(function, dir, + "bad entry in directory: %s - block=%llu" "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", - dir->i_ino, error_msg, - (unsigned long long) bh->b_blocknr, + error_msg, (unsigned long long) bh->b_blocknr, (unsigned) (offset%bh->b_size), offset, le32_to_cpu(de->inode), rlen, de->name_len); @@ -111,7 +110,7 @@ static int ext4_readdir(struct file *filp, if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_COMPAT_DIR_INDEX) && - ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) || + ((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) || ((inode->i_size >> sb->s_blocksize_bits) == 1))) { err = ext4_dx_readdir(filp, dirent, filldir); if (err != ERR_BAD_DX_DIR) { @@ -122,20 +121,20 @@ static int ext4_readdir(struct file *filp, * We don't set the inode dirty flag since it's not * critical that it get flushed back to the disk. */ - EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL; + ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX); } stored = 0; offset = filp->f_pos & (sb->s_blocksize - 1); while (!error && !stored && filp->f_pos < inode->i_size) { - ext4_lblk_t blk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb); - struct buffer_head map_bh; + struct ext4_map_blocks map; struct buffer_head *bh = NULL; - map_bh.b_state = 0; - err = ext4_get_blocks(NULL, inode, blk, 1, &map_bh, 0); + map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb); + map.m_len = 1; + err = ext4_map_blocks(NULL, inode, &map, 0); if (err > 0) { - pgoff_t index = map_bh.b_blocknr >> + pgoff_t index = map.m_pblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); if (!ra_has_index(&filp->f_ra, index)) page_cache_sync_readahead( @@ -143,7 +142,7 @@ static int ext4_readdir(struct file *filp, &filp->f_ra, filp, index, 1); filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; - bh = ext4_bread(NULL, inode, blk, 0, &err); + bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err); } /* @@ -152,9 +151,8 @@ static int ext4_readdir(struct file *filp, */ if (!bh) { if (!dir_has_error) { - ext4_error(sb, "directory #%lu " + EXT4_ERROR_INODE(inode, "directory " "contains a hole at offset %Lu", - inode->i_ino, (unsigned long long) filp->f_pos); dir_has_error = 1; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index bf938cf7c5f0..19a4de57128a 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -29,6 +29,9 @@ #include #include #include +#ifdef __KERNEL__ +#include +#endif /* * The fourth extended filesystem constants/structures @@ -54,10 +57,10 @@ #endif #define EXT4_ERROR_INODE(inode, fmt, a...) \ - ext4_error_inode(__func__, (inode), (fmt), ## a); + ext4_error_inode(__func__, (inode), (fmt), ## a) #define EXT4_ERROR_FILE(file, fmt, a...) \ - ext4_error_file(__func__, (file), (fmt), ## a); + ext4_error_file(__func__, (file), (fmt), ## a) /* data type for block offset of block group */ typedef int ext4_grpblk_t; @@ -72,7 +75,7 @@ typedef __u32 ext4_lblk_t; typedef unsigned int ext4_group_t; /* - * Flags used in mballoc's allocation_context flags field. + * Flags used in mballoc's allocation_context flags field. * * Also used to show what's going on for debugging purposes when the * flag field is exported via the traceport interface @@ -125,6 +128,29 @@ struct ext4_allocation_request { unsigned int flags; }; +/* + * Logical to physical block mapping, used by ext4_map_blocks() + * + * This structure is used to pass requests into ext4_map_blocks() as + * well as to store the information returned by ext4_map_blocks(). It + * takes less room on the stack than a struct buffer_head. + */ +#define EXT4_MAP_NEW (1 << BH_New) +#define EXT4_MAP_MAPPED (1 << BH_Mapped) +#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten) +#define EXT4_MAP_BOUNDARY (1 << BH_Boundary) +#define EXT4_MAP_UNINIT (1 << BH_Uninit) +#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\ + EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\ + EXT4_MAP_UNINIT) + +struct ext4_map_blocks { + ext4_fsblk_t m_pblk; + ext4_lblk_t m_lblk; + unsigned int m_len; + unsigned int m_flags; +}; + /* * For delayed allocation tracking */ @@ -321,6 +347,83 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) return flags & EXT4_OTHER_FLMASK; } +/* + * Inode flags used for atomic set/get + */ +enum { + EXT4_INODE_SECRM = 0, /* Secure deletion */ + EXT4_INODE_UNRM = 1, /* Undelete */ + EXT4_INODE_COMPR = 2, /* Compress file */ + EXT4_INODE_SYNC = 3, /* Synchronous updates */ + EXT4_INODE_IMMUTABLE = 4, /* Immutable file */ + EXT4_INODE_APPEND = 5, /* writes to file may only append */ + EXT4_INODE_NODUMP = 6, /* do not dump file */ + EXT4_INODE_NOATIME = 7, /* do not update atime */ +/* Reserved for compression usage... */ + EXT4_INODE_DIRTY = 8, + EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ + EXT4_INODE_NOCOMPR = 10, /* Don't compress */ + EXT4_INODE_ECOMPR = 11, /* Compression error */ +/* End compression flags --- maybe not all used */ + EXT4_INODE_INDEX = 12, /* hash-indexed directory */ + EXT4_INODE_IMAGIC = 13, /* AFS directory */ + EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */ + EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */ + EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */ + EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/ + EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */ + EXT4_INODE_EXTENTS = 19, /* Inode uses extents */ + EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */ + EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */ + EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */ +}; + +#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG)) +#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \ + printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \ + EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); } + +/* + * Since it's pretty easy to mix up bit numbers and hex values, and we + * can't do a compile-time test for ENUM values, we use a run-time + * test to make sure that EXT4_XXX_FL is consistent with respect to + * EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop + * out so it won't cost any extra space in the compiled kernel image. + * But it's important that these values are the same, since we are + * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL + * must be consistent with the values of FS_XXX_FL defined in + * include/linux/fs.h and the on-disk values found in ext2, ext3, and + * ext4 filesystems, and of course the values defined in e2fsprogs. + * + * It's not paranoia if the Murphy's Law really *is* out to get you. :-) + */ +static inline void ext4_check_flag_values(void) +{ + CHECK_FLAG_VALUE(SECRM); + CHECK_FLAG_VALUE(UNRM); + CHECK_FLAG_VALUE(COMPR); + CHECK_FLAG_VALUE(SYNC); + CHECK_FLAG_VALUE(IMMUTABLE); + CHECK_FLAG_VALUE(APPEND); + CHECK_FLAG_VALUE(NODUMP); + CHECK_FLAG_VALUE(NOATIME); + CHECK_FLAG_VALUE(DIRTY); + CHECK_FLAG_VALUE(COMPRBLK); + CHECK_FLAG_VALUE(NOCOMPR); + CHECK_FLAG_VALUE(ECOMPR); + CHECK_FLAG_VALUE(INDEX); + CHECK_FLAG_VALUE(IMAGIC); + CHECK_FLAG_VALUE(JOURNAL_DATA); + CHECK_FLAG_VALUE(NOTAIL); + CHECK_FLAG_VALUE(DIRSYNC); + CHECK_FLAG_VALUE(TOPDIR); + CHECK_FLAG_VALUE(HUGE_FILE); + CHECK_FLAG_VALUE(EXTENTS); + CHECK_FLAG_VALUE(EA_INODE); + CHECK_FLAG_VALUE(EOFBLOCKS); + CHECK_FLAG_VALUE(RESERVED); +} + /* Used to pass group descriptor data when online resize is done */ struct ext4_new_group_input { __u32 group; /* Group number for this data */ @@ -332,6 +435,18 @@ struct ext4_new_group_input { __u16 unused; }; +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +struct compat_ext4_new_group_input { + u32 group; + compat_u64 block_bitmap; + compat_u64 inode_bitmap; + compat_u64 inode_table; + u32 blocks_count; + u16 reserved_blocks; + u16 unused; +}; +#endif + /* The struct ext4_new_group_input in kernel space, with free_blocks_count */ struct ext4_new_group_data { __u32 group; @@ -355,7 +470,7 @@ struct ext4_new_group_data { #define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\ EXT4_GET_BLOCKS_CREATE) /* Caller is from the delayed allocation writeout path, - so set the magic i_delalloc_reserve_flag after taking the + so set the magic i_delalloc_reserve_flag after taking the inode allocation semaphore for */ #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004 /* caller is from the direct IO path, request to creation of an @@ -398,6 +513,7 @@ struct ext4_new_group_data { #define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12) #define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent) +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* * ioctl commands in 32 bit emulation */ @@ -408,11 +524,13 @@ struct ext4_new_group_data { #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) #define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int) #define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int) +#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input) #ifdef CONFIG_JBD2_DEBUG #define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int) #endif #define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION #define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION +#endif /* @@ -616,9 +734,8 @@ struct ext4_ext_cache { */ struct ext4_inode_info { __le32 i_data[15]; /* unconverted */ - __u32 i_flags; - ext4_fsblk_t i_file_acl; __u32 i_dtime; + ext4_fsblk_t i_file_acl; /* * i_block_group is the number of the block group which contains @@ -629,6 +746,7 @@ struct ext4_inode_info { */ ext4_group_t i_block_group; unsigned long i_state_flags; /* Dynamic state flags */ + unsigned long i_flags; ext4_lblk_t i_dir_start_lookup; #ifdef CONFIG_EXT4_FS_XATTR @@ -1062,22 +1180,25 @@ enum { EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */ EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ + EXT4_STATE_NEWENTRY, /* File just added to dir */ }; -static inline int ext4_test_inode_state(struct inode *inode, int bit) -{ - return test_bit(bit, &EXT4_I(inode)->i_state_flags); -} - -static inline void ext4_set_inode_state(struct inode *inode, int bit) -{ - set_bit(bit, &EXT4_I(inode)->i_state_flags); +#define EXT4_INODE_BIT_FNS(name, field) \ +static inline int ext4_test_inode_##name(struct inode *inode, int bit) \ +{ \ + return test_bit(bit, &EXT4_I(inode)->i_##field); \ +} \ +static inline void ext4_set_inode_##name(struct inode *inode, int bit) \ +{ \ + set_bit(bit, &EXT4_I(inode)->i_##field); \ +} \ +static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \ +{ \ + clear_bit(bit, &EXT4_I(inode)->i_##field); \ } -static inline void ext4_clear_inode_state(struct inode *inode, int bit) -{ - clear_bit(bit, &EXT4_I(inode)->i_state_flags); -} +EXT4_INODE_BIT_FNS(flag, flags) +EXT4_INODE_BIT_FNS(state, state_flags) #else /* Assume that user mode programs are passing in an ext4fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test @@ -1264,7 +1385,7 @@ struct ext4_dir_entry_2 { #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \ EXT4_FEATURE_COMPAT_DIR_INDEX) && \ - (EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) + ext4_test_inode_flag((dir), EXT4_INODE_INDEX)) #define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX) #define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1) @@ -1398,7 +1519,7 @@ extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, extern void ext4_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ -extern int ext4_sync_file(struct file *, struct dentry *, int); +extern int ext4_sync_file(struct file *, int); /* hash.c */ extern int ext4fs_dirhash(const char *name, int len, struct @@ -1678,6 +1799,7 @@ struct ext4_group_info { ext4_grpblk_t bb_first_free; /* first free block */ ext4_grpblk_t bb_free; /* total free blocks */ ext4_grpblk_t bb_fragments; /* nr of freespace fragments */ + ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */ struct list_head bb_prealloc_list; #ifdef DOUBLE_CHECK void *bb_bitmap; @@ -1772,9 +1894,8 @@ extern int ext4_ext_tree_init(handle_t *handle, struct inode *); extern int ext4_ext_writepage_trans_blocks(struct inode *, int); extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk); -extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, unsigned int max_blocks, - struct buffer_head *bh_result, int flags); +extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags); extern void ext4_ext_truncate(struct inode *); extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); @@ -1782,6 +1903,8 @@ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len); extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, ssize_t len); +extern int ext4_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags); extern int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, unsigned int max_blocks, struct buffer_head *bh, int flags); diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index b79ad5126468..dade0c024797 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -273,7 +273,7 @@ static inline int ext4_should_journal_data(struct inode *inode) return 1; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 1; - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) return 1; return 0; } @@ -284,7 +284,7 @@ static inline int ext4_should_order_data(struct inode *inode) return 0; if (!S_ISREG(inode->i_mode)) return 0; - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) return 0; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) return 1; @@ -297,7 +297,7 @@ static inline int ext4_should_writeback_data(struct inode *inode) return 0; if (EXT4_JOURNAL(inode) == NULL) return 1; - if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL) + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) return 0; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) return 1; @@ -321,7 +321,7 @@ static inline int ext4_should_dioread_nolock(struct inode *inode) return 0; if (!S_ISREG(inode->i_mode)) return 0; - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return 0; if (ext4_should_journal_data(inode)) return 0; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 236b834b4ca8..377309c1af65 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -107,11 +107,8 @@ static int ext4_ext_truncate_extend_restart(handle_t *handle, if (err <= 0) return err; err = ext4_truncate_restart_trans(handle, inode, needed); - /* - * We have dropped i_data_sem so someone might have cached again - * an extent we are going to truncate. - */ - ext4_ext_invalidate_cache(inode); + if (err == 0) + err = -EAGAIN; return err; } @@ -185,10 +182,10 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) { /* * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME - * block groups per flexgroup, reserve the first block - * group for directories and special files. Regular + * block groups per flexgroup, reserve the first block + * group for directories and special files. Regular * files will start at the second block group. This - * tends to speed up directory access and improves + * tends to speed up directory access and improves * fsck times. */ block_group &= ~(flex_size-1); @@ -439,10 +436,10 @@ static int __ext4_ext_check(const char *function, struct inode *inode, return 0; corrupted: - __ext4_error(inode->i_sb, function, - "bad header/extent in inode #%lu: %s - magic %x, " + ext4_error_inode(function, inode, + "bad header/extent: %s - magic %x, " "entries %u, max %u(%u), depth %u(%u)", - inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic), + error_msg, le16_to_cpu(eh->eh_magic), le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), max, le16_to_cpu(eh->eh_depth), depth); @@ -1622,9 +1619,7 @@ int ext4_ext_try_to_merge(struct inode *inode, merge_done = 1; WARN_ON(eh->eh_entries == 0); if (!eh->eh_entries) - ext4_error(inode->i_sb, - "inode#%lu, eh->eh_entries = 0!", - inode->i_ino); + EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); } return merge_done; @@ -2039,7 +2034,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, struct ext4_ext_cache *cex; int ret = EXT4_EXT_CACHE_NO; - /* + /* * We borrow i_block_reservation_lock to protect i_cached_extent */ spin_lock(&EXT4_I(inode)->i_block_reservation_lock); @@ -2361,7 +2356,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) int depth = ext_depth(inode); struct ext4_ext_path *path; handle_t *handle; - int i = 0, err = 0; + int i, err; ext_debug("truncate since %u\n", start); @@ -2370,23 +2365,26 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) if (IS_ERR(handle)) return PTR_ERR(handle); +again: ext4_ext_invalidate_cache(inode); /* * We start scanning from right side, freeing all the blocks * after i_size and walking into the tree depth-wise. */ + depth = ext_depth(inode); path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); if (path == NULL) { ext4_journal_stop(handle); return -ENOMEM; } + path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); if (ext4_ext_check(inode, path[0].p_hdr, depth)) { err = -EIO; goto out; } - path[0].p_depth = depth; + i = err = 0; while (i >= 0 && err == 0) { if (i == depth) { @@ -2480,6 +2478,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) out: ext4_ext_drop_refs(path); kfree(path); + if (err == -EAGAIN) + goto again; ext4_journal_stop(handle); return err; @@ -2544,7 +2544,7 @@ static void bi_complete(struct bio *bio, int error) /* FIXME!! we need to try to merge to left or right after zero-out */ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) { - int ret = -EIO; + int ret; struct bio *bio; int blkbits, blocksize; sector_t ee_pblock; @@ -2568,6 +2568,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) len = ee_len; bio = bio_alloc(GFP_NOIO, len); + if (!bio) + return -ENOMEM; + bio->bi_sector = ee_pblock; bio->bi_bdev = inode->i_sb->s_bdev; @@ -2595,22 +2598,20 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) submit_bio(WRITE, bio); wait_for_completion(&event); - if (test_bit(BIO_UPTODATE, &bio->bi_flags)) - ret = 0; - else { - ret = -EIO; - break; + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { + bio_put(bio); + return -EIO; } bio_put(bio); ee_len -= done; ee_pblock += done << (blkbits - 9); } - return ret; + return 0; } #define EXT4_EXT_ZERO_LEN 7 /* - * This function is called by ext4_ext_get_blocks() if someone tries to write + * This function is called by ext4_ext_map_blocks() if someone tries to write * to an uninitialized extent. It may result in splitting the uninitialized * extent into multiple extents (upto three - one initialized and two * uninitialized). @@ -2620,39 +2621,55 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) * c> Splits in three extents: Somone is writing in middle of the extent */ static int ext4_ext_convert_to_initialized(handle_t *handle, - struct inode *inode, - struct ext4_ext_path *path, - ext4_lblk_t iblock, - unsigned int max_blocks) + struct inode *inode, + struct ext4_map_blocks *map, + struct ext4_ext_path *path) { struct ext4_extent *ex, newex, orig_ex; struct ext4_extent *ex1 = NULL; struct ext4_extent *ex2 = NULL; struct ext4_extent *ex3 = NULL; struct ext4_extent_header *eh; - ext4_lblk_t ee_block; + ext4_lblk_t ee_block, eof_block; unsigned int allocated, ee_len, depth; ext4_fsblk_t newblock; int err = 0; int ret = 0; + int may_zeroout; + + ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" + "block %llu, max_blocks %u\n", inode->i_ino, + (unsigned long long)map->m_lblk, map->m_len); + + eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> + inode->i_sb->s_blocksize_bits; + if (eof_block < map->m_lblk + map->m_len) + eof_block = map->m_lblk + map->m_len; depth = ext_depth(inode); eh = path[depth].p_hdr; ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); - allocated = ee_len - (iblock - ee_block); - newblock = iblock - ee_block + ext_pblock(ex); + allocated = ee_len - (map->m_lblk - ee_block); + newblock = map->m_lblk - ee_block + ext_pblock(ex); + ex2 = ex; orig_ex.ee_block = ex->ee_block; orig_ex.ee_len = cpu_to_le16(ee_len); ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); + /* + * It is safe to convert extent to initialized via explicit + * zeroout only if extent is fully insde i_size or new_size. + */ + may_zeroout = ee_block + ee_len <= eof_block; + err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ - if (ee_len <= 2*EXT4_EXT_ZERO_LEN) { + if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -2665,10 +2682,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, return allocated; } - /* ex1: ee_block to iblock - 1 : uninitialized */ - if (iblock > ee_block) { + /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ + if (map->m_lblk > ee_block) { ex1 = ex; - ex1->ee_len = cpu_to_le16(iblock - ee_block); + ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } @@ -2677,15 +2694,15 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, * we insert ex3, if ex1 is NULL. This is to avoid temporary * overlap of blocks. */ - if (!ex1 && allocated > max_blocks) - ex2->ee_len = cpu_to_le16(max_blocks); + if (!ex1 && allocated > map->m_len) + ex2->ee_len = cpu_to_le16(map->m_len); /* ex3: to ee_block + ee_len : uninitialised */ - if (allocated > max_blocks) { + if (allocated > map->m_len) { unsigned int newdepth; /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ - if (allocated <= EXT4_EXT_ZERO_LEN) { + if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { /* - * iblock == ee_block is handled by the zerouout + * map->m_lblk == ee_block is handled by the zerouout * at the beginning. * Mark first half uninitialized. * Mark second half initialized and zero out the @@ -2698,7 +2715,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ext4_ext_dirty(handle, inode, path + depth); ex3 = &newex; - ex3->ee_block = cpu_to_le32(iblock); + ex3->ee_block = cpu_to_le32(map->m_lblk); ext4_ext_store_pblock(ex3, newblock); ex3->ee_len = cpu_to_le16(allocated); err = ext4_ext_insert_extent(handle, inode, path, @@ -2711,7 +2728,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ex->ee_len = orig_ex.ee_len; ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); - /* blocks available from iblock */ + /* blocks available from map->m_lblk */ return allocated; } else if (err) @@ -2733,8 +2750,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, */ depth = ext_depth(inode); ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, - iblock, path); + path = ext4_ext_find_extent(inode, map->m_lblk, + path); if (IS_ERR(path)) { err = PTR_ERR(path); return err; @@ -2754,12 +2771,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, return allocated; } ex3 = &newex; - ex3->ee_block = cpu_to_le32(iblock + max_blocks); - ext4_ext_store_pblock(ex3, newblock + max_blocks); - ex3->ee_len = cpu_to_le16(allocated - max_blocks); + ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); + ext4_ext_store_pblock(ex3, newblock + map->m_len); + ex3->ee_len = cpu_to_le16(allocated - map->m_len); ext4_ext_mark_uninitialized(ex3); err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); - if (err == -ENOSPC) { + if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -2769,7 +2786,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); /* zeroed the full extent */ - /* blocks available from iblock */ + /* blocks available from map->m_lblk */ return allocated; } else if (err) @@ -2783,11 +2800,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, * update the extent length after successful insert of the * split extent */ - orig_ex.ee_len = cpu_to_le16(ee_len - - ext4_ext_get_actual_len(ex3)); + ee_len -= ext4_ext_get_actual_len(ex3); + orig_ex.ee_len = cpu_to_le16(ee_len); + may_zeroout = ee_block + ee_len <= eof_block; + depth = newdepth; ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, iblock, path); + path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; @@ -2801,14 +2820,14 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, if (err) goto out; - allocated = max_blocks; + allocated = map->m_len; /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying * to insert a extent in the middle zerout directly * otherwise give the extent a chance to merge to left */ if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && - iblock != ee_block) { + map->m_lblk != ee_block && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -2818,7 +2837,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); /* zero out the first half */ - /* blocks available from iblock */ + /* blocks available from map->m_lblk */ return allocated; } } @@ -2829,12 +2848,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, */ if (ex1 && ex1 != ex) { ex1 = ex; - ex1->ee_len = cpu_to_le16(iblock - ee_block); + ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } - /* ex2: iblock to iblock + maxblocks-1 : initialised */ - ex2->ee_block = cpu_to_le32(iblock); + /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */ + ex2->ee_block = cpu_to_le32(map->m_lblk); ext4_ext_store_pblock(ex2, newblock); ex2->ee_len = cpu_to_le16(allocated); if (ex2 != ex) @@ -2877,7 +2896,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, goto out; insert: err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); - if (err == -ENOSPC) { + if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -2904,7 +2923,7 @@ fix_extent_len: } /* - * This function is called by ext4_ext_get_blocks() from + * This function is called by ext4_ext_map_blocks() from * ext4_get_blocks_dio_write() when DIO to write * to an uninitialized extent. * @@ -2927,9 +2946,8 @@ fix_extent_len: */ static int ext4_split_unwritten_extents(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, struct ext4_ext_path *path, - ext4_lblk_t iblock, - unsigned int max_blocks, int flags) { struct ext4_extent *ex, newex, orig_ex; @@ -2937,41 +2955,55 @@ static int ext4_split_unwritten_extents(handle_t *handle, struct ext4_extent *ex2 = NULL; struct ext4_extent *ex3 = NULL; struct ext4_extent_header *eh; - ext4_lblk_t ee_block; + ext4_lblk_t ee_block, eof_block; unsigned int allocated, ee_len, depth; ext4_fsblk_t newblock; int err = 0; + int may_zeroout; + + ext_debug("ext4_split_unwritten_extents: inode %lu, logical" + "block %llu, max_blocks %u\n", inode->i_ino, + (unsigned long long)map->m_lblk, map->m_len); + + eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> + inode->i_sb->s_blocksize_bits; + if (eof_block < map->m_lblk + map->m_len) + eof_block = map->m_lblk + map->m_len; - ext_debug("ext4_split_unwritten_extents: inode %lu," - "iblock %llu, max_blocks %u\n", inode->i_ino, - (unsigned long long)iblock, max_blocks); depth = ext_depth(inode); eh = path[depth].p_hdr; ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); - allocated = ee_len - (iblock - ee_block); - newblock = iblock - ee_block + ext_pblock(ex); + allocated = ee_len - (map->m_lblk - ee_block); + newblock = map->m_lblk - ee_block + ext_pblock(ex); + ex2 = ex; orig_ex.ee_block = ex->ee_block; orig_ex.ee_len = cpu_to_le16(ee_len); ext4_ext_store_pblock(&orig_ex, ext_pblock(ex)); + /* + * It is safe to convert extent to initialized via explicit + * zeroout only if extent is fully insde i_size or new_size. + */ + may_zeroout = ee_block + ee_len <= eof_block; + /* * If the uninitialized extent begins at the same logical * block where the write begins, and the write completely * covers the extent, then we don't need to split it. */ - if ((iblock == ee_block) && (allocated <= max_blocks)) + if ((map->m_lblk == ee_block) && (allocated <= map->m_len)) return allocated; err = ext4_ext_get_access(handle, inode, path + depth); if (err) goto out; - /* ex1: ee_block to iblock - 1 : uninitialized */ - if (iblock > ee_block) { + /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ + if (map->m_lblk > ee_block) { ex1 = ex; - ex1->ee_len = cpu_to_le16(iblock - ee_block); + ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } @@ -2980,18 +3012,18 @@ static int ext4_split_unwritten_extents(handle_t *handle, * we insert ex3, if ex1 is NULL. This is to avoid temporary * overlap of blocks. */ - if (!ex1 && allocated > max_blocks) - ex2->ee_len = cpu_to_le16(max_blocks); + if (!ex1 && allocated > map->m_len) + ex2->ee_len = cpu_to_le16(map->m_len); /* ex3: to ee_block + ee_len : uninitialised */ - if (allocated > max_blocks) { + if (allocated > map->m_len) { unsigned int newdepth; ex3 = &newex; - ex3->ee_block = cpu_to_le32(iblock + max_blocks); - ext4_ext_store_pblock(ex3, newblock + max_blocks); - ex3->ee_len = cpu_to_le16(allocated - max_blocks); + ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); + ext4_ext_store_pblock(ex3, newblock + map->m_len); + ex3->ee_len = cpu_to_le16(allocated - map->m_len); ext4_ext_mark_uninitialized(ex3); err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); - if (err == -ENOSPC) { + if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -3001,7 +3033,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, ext4_ext_store_pblock(ex, ext_pblock(&orig_ex)); ext4_ext_dirty(handle, inode, path + depth); /* zeroed the full extent */ - /* blocks available from iblock */ + /* blocks available from map->m_lblk */ return allocated; } else if (err) @@ -3015,11 +3047,13 @@ static int ext4_split_unwritten_extents(handle_t *handle, * update the extent length after successful insert of the * split extent */ - orig_ex.ee_len = cpu_to_le16(ee_len - - ext4_ext_get_actual_len(ex3)); + ee_len -= ext4_ext_get_actual_len(ex3); + orig_ex.ee_len = cpu_to_le16(ee_len); + may_zeroout = ee_block + ee_len <= eof_block; + depth = newdepth; ext4_ext_drop_refs(path); - path = ext4_ext_find_extent(inode, iblock, path); + path = ext4_ext_find_extent(inode, map->m_lblk, path); if (IS_ERR(path)) { err = PTR_ERR(path); goto out; @@ -3033,7 +3067,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, if (err) goto out; - allocated = max_blocks; + allocated = map->m_len; } /* * If there was a change of depth as part of the @@ -3042,15 +3076,15 @@ static int ext4_split_unwritten_extents(handle_t *handle, */ if (ex1 && ex1 != ex) { ex1 = ex; - ex1->ee_len = cpu_to_le16(iblock - ee_block); + ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); ext4_ext_mark_uninitialized(ex1); ex2 = &newex; } /* - * ex2: iblock to iblock + maxblocks-1 : to be direct IO written, - * uninitialised still. + * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written + * using direct I/O, uninitialised still. */ - ex2->ee_block = cpu_to_le32(iblock); + ex2->ee_block = cpu_to_le32(map->m_lblk); ext4_ext_store_pblock(ex2, newblock); ex2->ee_len = cpu_to_le16(allocated); ext4_ext_mark_uninitialized(ex2); @@ -3062,7 +3096,7 @@ static int ext4_split_unwritten_extents(handle_t *handle, goto out; insert: err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); - if (err == -ENOSPC) { + if (err == -ENOSPC && may_zeroout) { err = ext4_ext_zeroout(inode, &orig_ex); if (err) goto fix_extent_len; @@ -3152,10 +3186,9 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev, static int ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, unsigned int max_blocks, + struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags, - unsigned int allocated, struct buffer_head *bh_result, - ext4_fsblk_t newblock) + unsigned int allocated, ext4_fsblk_t newblock) { int ret = 0; int err = 0; @@ -3163,15 +3196,14 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" "block %llu, max_blocks %u, flags %d, allocated %u", - inode->i_ino, (unsigned long long)iblock, max_blocks, + inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, flags, allocated); ext4_ext_show_leaf(inode, path); /* get_block() before submit the IO, split the extent */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { - ret = ext4_split_unwritten_extents(handle, - inode, path, iblock, - max_blocks, flags); + ret = ext4_split_unwritten_extents(handle, inode, map, + path, flags); /* * Flag the inode(non aio case) or end_io struct (aio case) * that this IO needs to convertion to written when IO is @@ -3182,7 +3214,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); if (ext4_should_dioread_nolock(inode)) - set_buffer_uninit(bh_result); + map->m_flags |= EXT4_MAP_UNINIT; goto out; } /* IO end_io complete, convert the filled extent to written */ @@ -3210,14 +3242,12 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, * the buffer head will be unmapped so that * a read from the block returns 0s. */ - set_buffer_unwritten(bh_result); + map->m_flags |= EXT4_MAP_UNWRITTEN; goto out1; } /* buffered write, writepage time, convert*/ - ret = ext4_ext_convert_to_initialized(handle, inode, - path, iblock, - max_blocks); + ret = ext4_ext_convert_to_initialized(handle, inode, map, path); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out: @@ -3226,7 +3256,7 @@ out: goto out2; } else allocated = ret; - set_buffer_new(bh_result); + map->m_flags |= EXT4_MAP_NEW; /* * if we allocated more blocks than requested * we need to make sure we unmap the extra block @@ -3234,11 +3264,11 @@ out: * unmapped later when we find the buffer_head marked * new. */ - if (allocated > max_blocks) { + if (allocated > map->m_len) { unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, - newblock + max_blocks, - allocated - max_blocks); - allocated = max_blocks; + newblock + map->m_len, + allocated - map->m_len); + allocated = map->m_len; } /* @@ -3252,13 +3282,13 @@ out: ext4_da_update_reserve_space(inode, allocated, 0); map_out: - set_buffer_mapped(bh_result); + map->m_flags |= EXT4_MAP_MAPPED; out1: - if (allocated > max_blocks) - allocated = max_blocks; + if (allocated > map->m_len) + allocated = map->m_len; ext4_ext_show_leaf(inode, path); - bh_result->b_bdev = inode->i_sb->s_bdev; - bh_result->b_blocknr = newblock; + map->m_pblk = newblock; + map->m_len = allocated; out2: if (path) { ext4_ext_drop_refs(path); @@ -3284,26 +3314,23 @@ out2: * * return < 0, error case. */ -int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, - unsigned int max_blocks, struct buffer_head *bh_result, - int flags) +int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags) { struct ext4_ext_path *path = NULL; struct ext4_extent_header *eh; struct ext4_extent newex, *ex, *last_ex; ext4_fsblk_t newblock; - int err = 0, depth, ret, cache_type; + int i, err = 0, depth, ret, cache_type; unsigned int allocated = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; - __clear_bit(BH_New, &bh_result->b_state); ext_debug("blocks %u/%u requested for inode %lu\n", - iblock, max_blocks, inode->i_ino); + map->m_lblk, map->m_len, inode->i_ino); /* check in cache */ - cache_type = ext4_ext_in_cache(inode, iblock, &newex); + cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); if (cache_type) { if (cache_type == EXT4_EXT_CACHE_GAP) { if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { @@ -3316,12 +3343,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, /* we should allocate requested block */ } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { /* block is already allocated */ - newblock = iblock + newblock = map->m_lblk - le32_to_cpu(newex.ee_block) + ext_pblock(&newex); /* number of remaining blocks in the extent */ allocated = ext4_ext_get_actual_len(&newex) - - (iblock - le32_to_cpu(newex.ee_block)); + (map->m_lblk - le32_to_cpu(newex.ee_block)); goto out; } else { BUG(); @@ -3329,7 +3356,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, } /* find extent for this block */ - path = ext4_ext_find_extent(inode, iblock, NULL); + path = ext4_ext_find_extent(inode, map->m_lblk, NULL); if (IS_ERR(path)) { err = PTR_ERR(path); path = NULL; @@ -3345,8 +3372,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, */ if (unlikely(path[depth].p_ext == NULL && depth != 0)) { EXT4_ERROR_INODE(inode, "bad extent address " - "iblock: %d, depth: %d pblock %lld", - iblock, depth, path[depth].p_block); + "lblock: %lu, depth: %d pblock %lld", + (unsigned long) map->m_lblk, depth, + path[depth].p_block); err = -EIO; goto out2; } @@ -3364,12 +3392,12 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, */ ee_len = ext4_ext_get_actual_len(ex); /* if found extent covers block, simply return it */ - if (in_range(iblock, ee_block, ee_len)) { - newblock = iblock - ee_block + ee_start; + if (in_range(map->m_lblk, ee_block, ee_len)) { + newblock = map->m_lblk - ee_block + ee_start; /* number of remaining blocks in the extent */ - allocated = ee_len - (iblock - ee_block); - ext_debug("%u fit into %u:%d -> %llu\n", iblock, - ee_block, ee_len, newblock); + allocated = ee_len - (map->m_lblk - ee_block); + ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, + ee_block, ee_len, newblock); /* Do not put uninitialized extent in the cache */ if (!ext4_ext_is_uninitialized(ex)) { @@ -3379,8 +3407,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, goto out; } ret = ext4_ext_handle_uninitialized_extents(handle, - inode, iblock, max_blocks, path, - flags, allocated, bh_result, newblock); + inode, map, path, flags, allocated, + newblock); return ret; } } @@ -3394,7 +3422,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, * put just found gap into cache to speed up * subsequent requests */ - ext4_ext_put_gap_in_cache(inode, path, iblock); + ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); goto out2; } /* @@ -3402,11 +3430,11 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, */ /* find neighbour allocated blocks */ - ar.lleft = iblock; + ar.lleft = map->m_lblk; err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); if (err) goto out2; - ar.lright = iblock; + ar.lright = map->m_lblk; err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright); if (err) goto out2; @@ -3417,26 +3445,26 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is * EXT_UNINIT_MAX_LEN. */ - if (max_blocks > EXT_INIT_MAX_LEN && + if (map->m_len > EXT_INIT_MAX_LEN && !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) - max_blocks = EXT_INIT_MAX_LEN; - else if (max_blocks > EXT_UNINIT_MAX_LEN && + map->m_len = EXT_INIT_MAX_LEN; + else if (map->m_len > EXT_UNINIT_MAX_LEN && (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) - max_blocks = EXT_UNINIT_MAX_LEN; + map->m_len = EXT_UNINIT_MAX_LEN; - /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ - newex.ee_block = cpu_to_le32(iblock); - newex.ee_len = cpu_to_le16(max_blocks); + /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ + newex.ee_block = cpu_to_le32(map->m_lblk); + newex.ee_len = cpu_to_le16(map->m_len); err = ext4_ext_check_overlap(inode, &newex, path); if (err) allocated = ext4_ext_get_actual_len(&newex); else - allocated = max_blocks; + allocated = map->m_len; /* allocate new block */ ar.inode = inode; - ar.goal = ext4_ext_find_goal(inode, path, iblock); - ar.logical = iblock; + ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); + ar.logical = map->m_lblk; ar.len = allocated; if (S_ISREG(inode->i_mode)) ar.flags = EXT4_MB_HINT_DATA; @@ -3470,21 +3498,33 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, EXT4_STATE_DIO_UNWRITTEN); } if (ext4_should_dioread_nolock(inode)) - set_buffer_uninit(bh_result); + map->m_flags |= EXT4_MAP_UNINIT; } - if (unlikely(EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) { + if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) { if (unlikely(!eh->eh_entries)) { EXT4_ERROR_INODE(inode, - "eh->eh_entries == 0 ee_block %d", - ex->ee_block); + "eh->eh_entries == 0 and " + "EOFBLOCKS_FL set"); err = -EIO; goto out2; } last_ex = EXT_LAST_EXTENT(eh); - if (iblock + ar.len > le32_to_cpu(last_ex->ee_block) - + ext4_ext_get_actual_len(last_ex)) - EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; + /* + * If the current leaf block was reached by looking at + * the last index block all the way down the tree, and + * we are extending the inode beyond the last extent + * in the current leaf block, then clear the + * EOFBLOCKS_FL flag. + */ + for (i = depth-1; i >= 0; i--) { + if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) + break; + } + if ((i < 0) && + (map->m_lblk + ar.len > le32_to_cpu(last_ex->ee_block) + + ext4_ext_get_actual_len(last_ex))) + ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); if (err) { @@ -3500,9 +3540,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, /* previous routine could use block we allocated */ newblock = ext_pblock(&newex); allocated = ext4_ext_get_actual_len(&newex); - if (allocated > max_blocks) - allocated = max_blocks; - set_buffer_new(bh_result); + if (allocated > map->m_len) + allocated = map->m_len; + map->m_flags |= EXT4_MAP_NEW; /* * Update reserved blocks/metadata blocks after successful @@ -3516,18 +3556,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, * when it is _not_ an uninitialized extent. */ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { - ext4_ext_put_in_cache(inode, iblock, allocated, newblock, + ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, EXT4_EXT_CACHE_EXTENT); ext4_update_inode_fsync_trans(handle, inode, 1); } else ext4_update_inode_fsync_trans(handle, inode, 0); out: - if (allocated > max_blocks) - allocated = max_blocks; + if (allocated > map->m_len) + allocated = map->m_len; ext4_ext_show_leaf(inode, path); - set_buffer_mapped(bh_result); - bh_result->b_bdev = inode->i_sb->s_bdev; - bh_result->b_blocknr = newblock; + map->m_flags |= EXT4_MAP_MAPPED; + map->m_pblk = newblock; + map->m_len = allocated; out2: if (path) { ext4_ext_drop_refs(path); @@ -3625,7 +3665,7 @@ static void ext4_falloc_update_inode(struct inode *inode, * can proceed even if the new size is the same as i_size. */ if (new_size > i_size_read(inode)) - EXT4_I(inode)->i_flags |= EXT4_EOFBLOCKS_FL; + ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); } } @@ -3640,55 +3680,57 @@ static void ext4_falloc_update_inode(struct inode *inode, long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) { handle_t *handle; - ext4_lblk_t block; loff_t new_size; unsigned int max_blocks; int ret = 0; int ret2 = 0; int retries = 0; - struct buffer_head map_bh; + struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; /* * currently supporting (pre)allocate mode for extent-based * files _only_ */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EOPNOTSUPP; /* preallocation to directories is currently not supported */ if (S_ISDIR(inode->i_mode)) return -ENODEV; - block = offset >> blkbits; + map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - - block; + - map.m_lblk; /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, max_blocks); mutex_lock(&inode->i_mutex); + ret = inode_newsize_ok(inode, (len + offset)); + if (ret) { + mutex_unlock(&inode->i_mutex); + return ret; + } retry: while (ret >= 0 && ret < max_blocks) { - block = block + ret; - max_blocks = max_blocks - ret; + map.m_lblk = map.m_lblk + ret; + map.m_len = max_blocks = max_blocks - ret; handle = ext4_journal_start(inode, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } - map_bh.b_state = 0; - ret = ext4_get_blocks(handle, inode, block, - max_blocks, &map_bh, + ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); if (ret <= 0) { #ifdef EXT4FS_DEBUG WARN_ON(ret <= 0); - printk(KERN_ERR "%s: ext4_ext_get_blocks " + printk(KERN_ERR "%s: ext4_ext_map_blocks " "returned error inode#%lu, block=%u, " "max_blocks=%u", __func__, inode->i_ino, block, max_blocks); @@ -3697,14 +3739,14 @@ retry: ret2 = ext4_journal_stop(handle); break; } - if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len, + if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, blkbits) >> blkbits)) new_size = offset + len; else - new_size = (block + ret) << blkbits; + new_size = (map.m_lblk + ret) << blkbits; ext4_falloc_update_inode(inode, mode, new_size, - buffer_new(&map_bh)); + (map.m_flags & EXT4_MAP_NEW)); ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); if (ret2) @@ -3733,42 +3775,39 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, ssize_t len) { handle_t *handle; - ext4_lblk_t block; unsigned int max_blocks; int ret = 0; int ret2 = 0; - struct buffer_head map_bh; + struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; - block = offset >> blkbits; + map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because * If blocksize = 4096 offset = 3072 and len = 2048 */ - max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - - block; + max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - + map.m_lblk); /* * credits to insert 1 extent into extent tree */ credits = ext4_chunk_trans_blocks(inode, max_blocks); while (ret >= 0 && ret < max_blocks) { - block = block + ret; - max_blocks = max_blocks - ret; + map.m_lblk += ret; + map.m_len = (max_blocks -= ret); handle = ext4_journal_start(inode, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); break; } - map_bh.b_state = 0; - ret = ext4_get_blocks(handle, inode, block, - max_blocks, &map_bh, + ret = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_IO_CONVERT_EXT); if (ret <= 0) { WARN_ON(ret <= 0); - printk(KERN_ERR "%s: ext4_ext_get_blocks " + printk(KERN_ERR "%s: ext4_ext_map_blocks " "returned error inode#%lu, block=%u, " "max_blocks=%u", __func__, - inode->i_ino, block, max_blocks); + inode->i_ino, map.m_lblk, map.m_len); } ext4_mark_inode_dirty(handle, inode); ret2 = ext4_journal_stop(handle); @@ -3898,7 +3937,7 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int error = 0; /* fallback to generic here if not in extents fmt */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return generic_block_fiemap(inode, fieinfo, start, len, ext4_get_block); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index d0776e410f34..5313ae4cda2d 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -66,7 +66,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, * is smaller than s_maxbytes, which is for extent-mapped files. */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); size_t length = iov_length(iov, nr_segs); diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index ef3d980e67cb..592adf2e546e 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -34,6 +34,29 @@ #include +/* + * If we're not journaling and this is a just-created file, we have to + * sync our parent directory (if it was freshly created) since + * otherwise it will only be written by writeback, leaving a huge + * window during which a crash may lose the file. This may apply for + * the parent directory's parent as well, and so on recursively, if + * they are also freshly created. + */ +static void ext4_sync_parent(struct inode *inode) +{ + struct dentry *dentry = NULL; + + while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { + ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); + dentry = list_entry(inode->i_dentry.next, + struct dentry, d_alias); + if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) + break; + inode = dentry->d_parent->d_inode; + sync_mapping_buffers(inode->i_mapping); + } +} + /* * akpm: A new design for ext4_sync_file(). * @@ -48,9 +71,9 @@ * i_mutex lock is held when entering and exiting this function */ -int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) +int ext4_sync_file(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; int ret; @@ -58,7 +81,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) J_ASSERT(ext4_journal_current_handle() == NULL); - trace_ext4_sync_file(file, dentry, datasync); + trace_ext4_sync_file(file, datasync); if (inode->i_sb->s_flags & MS_RDONLY) return 0; @@ -66,9 +89,13 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) ret = flush_completed_IO(inode); if (ret < 0) return ret; - - if (!journal) - return simple_fsync(file, dentry, datasync); + + if (!journal) { + ret = generic_file_fsync(file, datasync); + if (!ret && !list_empty(&inode->i_dentry)) + ext4_sync_parent(inode); + return ret; + } /* * data=writeback,ordered: @@ -102,7 +129,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) (journal->j_flags & JBD2_BARRIER)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); - jbd2_log_wait_commit(journal, commit_tid); + ret = jbd2_log_wait_commit(journal, commit_tid); } else if (journal->j_flags & JBD2_BARRIER) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT); diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 1a0e183a2f04..25c4b3173fd9 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) if (fatal) goto error_return; - /* Ok, now we can actually update the inode bitmaps.. */ - cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), - bit, bitmap_bh->b_data); - if (!cleared) - ext4_error(sb, "bit already cleared for inode %lu", ino); - else { - gdp = ext4_get_group_desc(sb, block_group, &bh2); - + fatal = -ESRCH; + gdp = ext4_get_group_desc(sb, block_group, &bh2); + if (gdp) { BUFFER_TRACE(bh2, "get_write_access"); fatal = ext4_journal_get_write_access(handle, bh2); - if (fatal) goto error_return; - - if (gdp) { - ext4_lock_group(sb, block_group); - count = ext4_free_inodes_count(sb, gdp) + 1; - ext4_free_inodes_set(sb, gdp, count); - if (is_directory) { - count = ext4_used_dirs_count(sb, gdp) - 1; - ext4_used_dirs_set(sb, gdp, count); - if (sbi->s_log_groups_per_flex) { - ext4_group_t f; - - f = ext4_flex_group(sbi, block_group); - atomic_dec(&sbi->s_flex_groups[f].used_dirs); - } + } + ext4_lock_group(sb, block_group); + cleared = ext4_clear_bit(bit, bitmap_bh->b_data); + if (fatal || !cleared) { + ext4_unlock_group(sb, block_group); + goto out; + } - } - gdp->bg_checksum = ext4_group_desc_csum(sbi, - block_group, gdp); - ext4_unlock_group(sb, block_group); - percpu_counter_inc(&sbi->s_freeinodes_counter); - if (is_directory) - percpu_counter_dec(&sbi->s_dirs_counter); - - if (sbi->s_log_groups_per_flex) { - ext4_group_t f; - - f = ext4_flex_group(sbi, block_group); - atomic_inc(&sbi->s_flex_groups[f].free_inodes); - } - } - BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, NULL, bh2); - if (!fatal) fatal = err; + count = ext4_free_inodes_count(sb, gdp) + 1; + ext4_free_inodes_set(sb, gdp, count); + if (is_directory) { + count = ext4_used_dirs_count(sb, gdp) - 1; + ext4_used_dirs_set(sb, gdp, count); + percpu_counter_dec(&sbi->s_dirs_counter); } - BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); - if (!fatal) - fatal = err; - sb->s_dirt = 1; + gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); + ext4_unlock_group(sb, block_group); + + percpu_counter_inc(&sbi->s_freeinodes_counter); + if (sbi->s_log_groups_per_flex) { + ext4_group_t f = ext4_flex_group(sbi, block_group); + + atomic_inc(&sbi->s_flex_groups[f].free_inodes); + if (is_directory) + atomic_dec(&sbi->s_flex_groups[f].used_dirs); + } + BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); + fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); +out: + if (cleared) { + BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); + if (!fatal) + fatal = err; + sb->s_dirt = 1; + } else + ext4_error(sb, "bit already cleared for inode %lu", ino); + error_return: brelse(bitmap_bh); ext4_std_error(sb, fatal); @@ -499,7 +492,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent, if (S_ISDIR(mode) && ((parent == sb->s_root->d_inode) || - (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) { + (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1; @@ -1041,7 +1034,7 @@ got: if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { /* set extent flag only for directory, file and normal symlink*/ if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) { - EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL; + ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); ext4_ext_tree_init(handle, inode); } } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3e0f6af9d08d..19df61c321fd 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -149,7 +149,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, int ret; /* - * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this + * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this * moment, get_block can be called only for blocks inside i_size since * page cache has been already dropped and writes are blocked by * i_mutex. So we can safely drop the i_data_sem here. @@ -348,9 +348,8 @@ static int __ext4_check_blockref(const char *function, struct inode *inode, if (blk && unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), blk, 1))) { - __ext4_error(inode->i_sb, function, - "invalid block reference %u " - "in inode #%lu", blk, inode->i_ino); + ext4_error_inode(function, inode, + "invalid block reference %u", blk); return -EIO; } } @@ -785,7 +784,7 @@ failed: /* Allocation failed, free what we already allocated */ ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); for (i = 1; i <= n ; i++) { - /* + /* * branch[i].bh is newly allocated, so there is no * need to revoke the block, which is why we don't * need to set EXT4_FREE_BLOCKS_METADATA. @@ -875,7 +874,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, err_out: for (i = 1; i <= num; i++) { - /* + /* * branch[i].bh is newly allocated, so there is no * need to revoke the block, which is why we don't * need to set EXT4_FREE_BLOCKS_METADATA. @@ -890,9 +889,9 @@ err_out: } /* - * The ext4_ind_get_blocks() function handles non-extents inodes + * The ext4_ind_map_blocks() function handles non-extents inodes * (i.e., using the traditional indirect/double-indirect i_blocks - * scheme) for ext4_get_blocks(). + * scheme) for ext4_map_blocks(). * * Allocation strategy is simple: if we have to allocate something, we will * have to go the whole way to leaf. So let's do it before attaching anything @@ -917,9 +916,8 @@ err_out: * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system * blocks. */ -static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, - ext4_lblk_t iblock, unsigned int maxblocks, - struct buffer_head *bh_result, +static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags) { int err = -EIO; @@ -933,9 +931,9 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, int count = 0; ext4_fsblk_t first_block = 0; - J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); + J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); - depth = ext4_block_to_path(inode, iblock, offsets, + depth = ext4_block_to_path(inode, map->m_lblk, offsets, &blocks_to_boundary); if (depth == 0) @@ -946,10 +944,9 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, /* Simplest case - block found, no allocation needed */ if (!partial) { first_block = le32_to_cpu(chain[depth - 1].key); - clear_buffer_new(bh_result); count++; /*map more blocks*/ - while (count < maxblocks && count <= blocks_to_boundary) { + while (count < map->m_len && count <= blocks_to_boundary) { ext4_fsblk_t blk; blk = le32_to_cpu(*(chain[depth-1].p + count)); @@ -969,7 +966,7 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, /* * Okay, we need to do block allocation. */ - goal = ext4_find_goal(inode, iblock, partial); + goal = ext4_find_goal(inode, map->m_lblk, partial); /* the number of blocks need to allocate for [d,t]indirect blocks */ indirect_blks = (chain + depth) - partial - 1; @@ -979,11 +976,11 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, * direct blocks to allocate for this branch. */ count = ext4_blks_to_allocate(partial, indirect_blks, - maxblocks, blocks_to_boundary); + map->m_len, blocks_to_boundary); /* * Block out ext4_truncate while we alter the tree */ - err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, + err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, &count, goal, offsets + (partial - chain), partial); @@ -995,18 +992,20 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, * may need to return -EAGAIN upwards in the worst case. --sct */ if (!err) - err = ext4_splice_branch(handle, inode, iblock, + err = ext4_splice_branch(handle, inode, map->m_lblk, partial, indirect_blks, count); if (err) goto cleanup; - set_buffer_new(bh_result); + map->m_flags |= EXT4_MAP_NEW; ext4_update_inode_fsync_trans(handle, inode, 1); got_it: - map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); + map->m_flags |= EXT4_MAP_MAPPED; + map->m_pblk = le32_to_cpu(chain[depth-1].key); + map->m_len = count; if (count > blocks_to_boundary) - set_buffer_boundary(bh_result); + map->m_flags |= EXT4_MAP_BOUNDARY; err = count; /* Clean up and exit */ partial = chain + depth - 1; /* the whole chain */ @@ -1016,7 +1015,6 @@ cleanup: brelse(partial->bh); partial--; } - BUFFER_TRACE(bh_result, "returned"); out: return err; } @@ -1061,7 +1059,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode, */ static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) { - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return ext4_ext_calc_metadata_amount(inode, lblock); return ext4_indirect_calc_metadata_amount(inode, lblock); @@ -1076,7 +1074,6 @@ void ext4_da_update_reserve_space(struct inode *inode, { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); - int mdb_free = 0, allocated_meta_blocks = 0; spin_lock(&ei->i_block_reservation_lock); trace_ext4_da_update_reserve_space(inode, used); @@ -1091,11 +1088,10 @@ void ext4_da_update_reserve_space(struct inode *inode, /* Update per-inode reservations */ ei->i_reserved_data_blocks -= used; - used += ei->i_allocated_meta_blocks; ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; - allocated_meta_blocks = ei->i_allocated_meta_blocks; + percpu_counter_sub(&sbi->s_dirtyblocks_counter, + used + ei->i_allocated_meta_blocks); ei->i_allocated_meta_blocks = 0; - percpu_counter_sub(&sbi->s_dirtyblocks_counter, used); if (ei->i_reserved_data_blocks == 0) { /* @@ -1103,30 +1099,23 @@ void ext4_da_update_reserve_space(struct inode *inode, * only when we have written all of the delayed * allocation blocks. */ - mdb_free = ei->i_reserved_meta_blocks; + percpu_counter_sub(&sbi->s_dirtyblocks_counter, + ei->i_reserved_meta_blocks); ei->i_reserved_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; - percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); } spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); - /* Update quota subsystem */ - if (quota_claim) { + /* Update quota subsystem for data blocks */ + if (quota_claim) dquot_claim_block(inode, used); - if (mdb_free) - dquot_release_reservation_block(inode, mdb_free); - } else { + else { /* * We did fallocate with an offset that is already delayed * allocated. So on delayed allocated writeback we should - * not update the quota for allocated blocks. But then - * converting an fallocate region to initialized region would - * have caused a metadata allocation. So claim quota for - * that + * not re-claim the quota for fallocated blocks. */ - if (allocated_meta_blocks) - dquot_claim_block(inode, allocated_meta_blocks); - dquot_release_reservation_block(inode, mdb_free + used); + dquot_release_reservation_block(inode, used); } /* @@ -1139,15 +1128,15 @@ void ext4_da_update_reserve_space(struct inode *inode, ext4_discard_preallocations(inode); } -static int check_block_validity(struct inode *inode, const char *msg, - sector_t logical, sector_t phys, int len) +static int check_block_validity(struct inode *inode, const char *func, + struct ext4_map_blocks *map) { - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { - __ext4_error(inode->i_sb, msg, - "inode #%lu logical block %llu mapped to %llu " - "(size %d)", inode->i_ino, - (unsigned long long) logical, - (unsigned long long) phys, len); + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, + map->m_len)) { + ext4_error_inode(func, inode, + "lblock %lu mapped to illegal pblock %llu " + "(length %d)", (unsigned long) map->m_lblk, + map->m_pblk, map->m_len); return -EIO; } return 0; @@ -1212,15 +1201,15 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, } /* - * The ext4_get_blocks() function tries to look up the requested blocks, + * The ext4_map_blocks() function tries to look up the requested blocks, * and returns if the blocks are already mapped. * * Otherwise it takes the write lock of the i_data_sem and allocate blocks * and store the allocated blocks in the result buffer head and mark it * mapped. * - * If file type is extents based, it will call ext4_ext_get_blocks(), - * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping + * If file type is extents based, it will call ext4_ext_map_blocks(), + * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping * based files * * On success, it returns the number of blocks being mapped or allocate. @@ -1233,35 +1222,29 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, * * It returns the error in case of allocation failure. */ -int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, - unsigned int max_blocks, struct buffer_head *bh, - int flags) +int ext4_map_blocks(handle_t *handle, struct inode *inode, + struct ext4_map_blocks *map, int flags) { int retval; - clear_buffer_mapped(bh); - clear_buffer_unwritten(bh); - - ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u," - "logical block %lu\n", inode->i_ino, flags, max_blocks, - (unsigned long)block); + map->m_flags = 0; + ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," + "logical block %lu\n", inode->i_ino, flags, map->m_len, + (unsigned long) map->m_lblk); /* * Try to see if we can get the block without requesting a new * file system block. */ down_read((&EXT4_I(inode)->i_data_sem)); - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { - retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, - bh, 0); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { + retval = ext4_ext_map_blocks(handle, inode, map, 0); } else { - retval = ext4_ind_get_blocks(handle, inode, block, max_blocks, - bh, 0); + retval = ext4_ind_map_blocks(handle, inode, map, 0); } up_read((&EXT4_I(inode)->i_data_sem)); - if (retval > 0 && buffer_mapped(bh)) { - int ret = check_block_validity(inode, "file system corruption", - block, bh->b_blocknr, retval); + if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { + int ret = check_block_validity(inode, __func__, map); if (ret != 0) return ret; } @@ -1277,7 +1260,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, * ext4_ext_get_block() returns th create = 0 * with buffer head unmapped. */ - if (retval > 0 && buffer_mapped(bh)) + if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) return retval; /* @@ -1290,7 +1273,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, * of BH_Unwritten and BH_Mapped flags being simultaneously * set on the buffer_head. */ - clear_buffer_unwritten(bh); + map->m_flags &= ~EXT4_MAP_UNWRITTEN; /* * New blocks allocate and/or writing to uninitialized extent @@ -1312,14 +1295,12 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, * We need to check for EXT4 here because migrate * could have changed the inode type in between */ - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { - retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, - bh, flags); + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { + retval = ext4_ext_map_blocks(handle, inode, map, flags); } else { - retval = ext4_ind_get_blocks(handle, inode, block, - max_blocks, bh, flags); + retval = ext4_ind_map_blocks(handle, inode, map, flags); - if (retval > 0 && buffer_new(bh)) { + if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { /* * We allocated new blocks which will result in * i_data's format changing. Force the migrate @@ -1342,10 +1323,10 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, EXT4_I(inode)->i_delalloc_reserved_flag = 0; up_write((&EXT4_I(inode)->i_data_sem)); - if (retval > 0 && buffer_mapped(bh)) { - int ret = check_block_validity(inode, "file system " - "corruption after allocation", - block, bh->b_blocknr, retval); + if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { + int ret = check_block_validity(inode, + "ext4_map_blocks_after_alloc", + map); if (ret != 0) return ret; } @@ -1355,109 +1336,109 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 -int ext4_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) +static int _ext4_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int flags) { handle_t *handle = ext4_journal_current_handle(); + struct ext4_map_blocks map; int ret = 0, started = 0; - unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; int dio_credits; - if (create && !handle) { + map.m_lblk = iblock; + map.m_len = bh->b_size >> inode->i_blkbits; + + if (flags && !handle) { /* Direct IO write... */ - if (max_blocks > DIO_MAX_BLOCKS) - max_blocks = DIO_MAX_BLOCKS; - dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); + if (map.m_len > DIO_MAX_BLOCKS) + map.m_len = DIO_MAX_BLOCKS; + dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); handle = ext4_journal_start(inode, dio_credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); - goto out; + return ret; } started = 1; } - ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, - create ? EXT4_GET_BLOCKS_CREATE : 0); + ret = ext4_map_blocks(handle, inode, &map, flags); if (ret > 0) { - bh_result->b_size = (ret << inode->i_blkbits); + map_bh(bh, inode->i_sb, map.m_pblk); + bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; + bh->b_size = inode->i_sb->s_blocksize * map.m_len; ret = 0; } if (started) ext4_journal_stop(handle); -out: return ret; } +int ext4_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + return _ext4_get_block(inode, iblock, bh, + create ? EXT4_GET_BLOCKS_CREATE : 0); +} + /* * `handle' can be NULL if create is zero */ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, ext4_lblk_t block, int create, int *errp) { - struct buffer_head dummy; + struct ext4_map_blocks map; + struct buffer_head *bh; int fatal = 0, err; - int flags = 0; J_ASSERT(handle != NULL || create == 0); - dummy.b_state = 0; - dummy.b_blocknr = -1000; - buffer_trace_init(&dummy.b_history); - if (create) - flags |= EXT4_GET_BLOCKS_CREATE; - err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags); - /* - * ext4_get_blocks() returns number of blocks mapped. 0 in - * case of a HOLE. - */ - if (err > 0) { - if (err > 1) - WARN_ON(1); - err = 0; + map.m_lblk = block; + map.m_len = 1; + err = ext4_map_blocks(handle, inode, &map, + create ? EXT4_GET_BLOCKS_CREATE : 0); + + if (err < 0) + *errp = err; + if (err <= 0) + return NULL; + *errp = 0; + + bh = sb_getblk(inode->i_sb, map.m_pblk); + if (!bh) { + *errp = -EIO; + return NULL; } - *errp = err; - if (!err && buffer_mapped(&dummy)) { - struct buffer_head *bh; - bh = sb_getblk(inode->i_sb, dummy.b_blocknr); - if (!bh) { - *errp = -EIO; - goto err; - } - if (buffer_new(&dummy)) { - J_ASSERT(create != 0); - J_ASSERT(handle != NULL); + if (map.m_flags & EXT4_MAP_NEW) { + J_ASSERT(create != 0); + J_ASSERT(handle != NULL); - /* - * Now that we do not always journal data, we should - * keep in mind whether this should always journal the - * new buffer as metadata. For now, regular file - * writes use ext4_get_block instead, so it's not a - * problem. - */ - lock_buffer(bh); - BUFFER_TRACE(bh, "call get_create_access"); - fatal = ext4_journal_get_create_access(handle, bh); - if (!fatal && !buffer_uptodate(bh)) { - memset(bh->b_data, 0, inode->i_sb->s_blocksize); - set_buffer_uptodate(bh); - } - unlock_buffer(bh); - BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - err = ext4_handle_dirty_metadata(handle, inode, bh); - if (!fatal) - fatal = err; - } else { - BUFFER_TRACE(bh, "not a new buffer"); - } - if (fatal) { - *errp = fatal; - brelse(bh); - bh = NULL; + /* + * Now that we do not always journal data, we should + * keep in mind whether this should always journal the + * new buffer as metadata. For now, regular file + * writes use ext4_get_block instead, so it's not a + * problem. + */ + lock_buffer(bh); + BUFFER_TRACE(bh, "call get_create_access"); + fatal = ext4_journal_get_create_access(handle, bh); + if (!fatal && !buffer_uptodate(bh)) { + memset(bh->b_data, 0, inode->i_sb->s_blocksize); + set_buffer_uptodate(bh); } - return bh; + unlock_buffer(bh); + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); + err = ext4_handle_dirty_metadata(handle, inode, bh); + if (!fatal) + fatal = err; + } else { + BUFFER_TRACE(bh, "not a new buffer"); } -err: - return NULL; + if (fatal) { + *errp = fatal; + brelse(bh); + bh = NULL; + } + return bh; } struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, @@ -1860,7 +1841,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) int retries = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); - unsigned long md_needed, md_reserved; + unsigned long md_needed; int ret; /* @@ -1870,22 +1851,24 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) */ repeat: spin_lock(&ei->i_block_reservation_lock); - md_reserved = ei->i_reserved_meta_blocks; md_needed = ext4_calc_metadata_amount(inode, lblock); trace_ext4_da_reserve_space(inode, md_needed); spin_unlock(&ei->i_block_reservation_lock); /* - * Make quota reservation here to prevent quota overflow - * later. Real quota accounting is done at pages writeout - * time. + * We will charge metadata quota at writeout time; this saves + * us from metadata over-estimation, though we may go over by + * a small amount in the end. Here we just reserve for data. */ - ret = dquot_reserve_block(inode, md_needed + 1); + ret = dquot_reserve_block(inode, 1); if (ret) return ret; - + /* + * We do still charge estimated metadata to the sb though; + * we cannot afford to run out of free blocks. + */ if (ext4_claim_free_blocks(sbi, md_needed + 1)) { - dquot_release_reservation_block(inode, md_needed + 1); + dquot_release_reservation_block(inode, 1); if (ext4_should_retry_alloc(inode->i_sb, &retries)) { yield(); goto repeat; @@ -1910,6 +1893,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free) spin_lock(&EXT4_I(inode)->i_block_reservation_lock); + trace_ext4_da_release_space(inode, to_free); if (unlikely(to_free > ei->i_reserved_data_blocks)) { /* * if there aren't enough reserved blocks, then the @@ -1932,12 +1916,13 @@ static void ext4_da_release_space(struct inode *inode, int to_free) * only when we have written all of the delayed * allocation blocks. */ - to_free += ei->i_reserved_meta_blocks; + percpu_counter_sub(&sbi->s_dirtyblocks_counter, + ei->i_reserved_meta_blocks); ei->i_reserved_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; } - /* update fs dirty blocks counter */ + /* update fs dirty data blocks counter */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, to_free); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); @@ -2042,28 +2027,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd) /* * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers * - * @mpd->inode - inode to walk through - * @exbh->b_blocknr - first block on a disk - * @exbh->b_size - amount of space in bytes - * @logical - first logical block to start assignment with - * * the function goes through all passed space and put actual disk * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten */ -static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, - struct buffer_head *exbh) +static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, + struct ext4_map_blocks *map) { struct inode *inode = mpd->inode; struct address_space *mapping = inode->i_mapping; - int blocks = exbh->b_size >> inode->i_blkbits; - sector_t pblock = exbh->b_blocknr, cur_logical; + int blocks = map->m_len; + sector_t pblock = map->m_pblk, cur_logical; struct buffer_head *head, *bh; pgoff_t index, end; struct pagevec pvec; int nr_pages, i; - index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + end = (map->m_lblk + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); @@ -2090,17 +2070,16 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, /* skip blocks out of the range */ do { - if (cur_logical >= logical) + if (cur_logical >= map->m_lblk) break; cur_logical++; } while ((bh = bh->b_this_page) != head); do { - if (cur_logical >= logical + blocks) + if (cur_logical >= map->m_lblk + blocks) break; - if (buffer_delay(bh) || - buffer_unwritten(bh)) { + if (buffer_delay(bh) || buffer_unwritten(bh)) { BUG_ON(bh->b_bdev != inode->i_sb->s_bdev); @@ -2119,7 +2098,7 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, } else if (buffer_mapped(bh)) BUG_ON(bh->b_blocknr != pblock); - if (buffer_uninit(exbh)) + if (map->m_flags & EXT4_MAP_UNINIT) set_buffer_uninit(bh); cur_logical++; pblock++; @@ -2130,21 +2109,6 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, } -/* - * __unmap_underlying_blocks - just a helper function to unmap - * set of blocks described by @bh - */ -static inline void __unmap_underlying_blocks(struct inode *inode, - struct buffer_head *bh) -{ - struct block_device *bdev = inode->i_sb->s_bdev; - int blocks, i; - - blocks = bh->b_size >> inode->i_blkbits; - for (i = 0; i < blocks; i++) - unmap_underlying_metadata(bdev, bh->b_blocknr + i); -} - static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, sector_t logical, long blk_cnt) { @@ -2206,7 +2170,7 @@ static void ext4_print_free_blocks(struct inode *inode) static int mpage_da_map_blocks(struct mpage_da_data *mpd) { int err, blks, get_blocks_flags; - struct buffer_head new; + struct ext4_map_blocks map; sector_t next = mpd->b_blocknr; unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; loff_t disksize = EXT4_I(mpd->inode)->i_disksize; @@ -2247,15 +2211,15 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting * variables are updated after the blocks have been allocated. */ - new.b_state = 0; + map.m_lblk = next; + map.m_len = max_blocks; get_blocks_flags = EXT4_GET_BLOCKS_CREATE; if (ext4_should_dioread_nolock(mpd->inode)) get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; if (mpd->b_state & (1 << BH_Delay)) get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; - blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks, - &new, get_blocks_flags); + blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); if (blks < 0) { err = blks; /* @@ -2282,7 +2246,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) ext4_msg(mpd->inode->i_sb, KERN_CRIT, "delayed block allocation failed for inode %lu at " "logical offset %llu with max blocks %zd with " - "error %d\n", mpd->inode->i_ino, + "error %d", mpd->inode->i_ino, (unsigned long long) next, mpd->b_size >> mpd->inode->i_blkbits, err); printk(KERN_CRIT "This should not happen!! " @@ -2297,10 +2261,13 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) } BUG_ON(blks == 0); - new.b_size = (blks << mpd->inode->i_blkbits); + if (map.m_flags & EXT4_MAP_NEW) { + struct block_device *bdev = mpd->inode->i_sb->s_bdev; + int i; - if (buffer_new(&new)) - __unmap_underlying_blocks(mpd->inode, &new); + for (i = 0; i < map.m_len; i++) + unmap_underlying_metadata(bdev, map.m_pblk + i); + } /* * If blocks are delayed marked, we need to @@ -2308,7 +2275,7 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd) */ if ((mpd->b_state & (1 << BH_Delay)) || (mpd->b_state & (1 << BH_Unwritten))) - mpage_put_bnr_to_bhs(mpd, next, &new); + mpage_put_bnr_to_bhs(mpd, &map); if (ext4_should_order_data(mpd->inode)) { err = ext4_jbd2_file_inode(handle, mpd->inode); @@ -2349,8 +2316,17 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t next; int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; + /* + * XXX Don't go larger than mballoc is willing to allocate + * This is a stopgap solution. We eventually need to fold + * mpage_da_submit_io() into this function and then call + * ext4_get_blocks() multiple times in a loop + */ + if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) + goto flush_it; + /* check if thereserved journal credits might overflow */ - if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { + if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { if (nrblocks >= EXT4_MAX_TRANS_DATA) { /* * With non-extent format we are limited by the journal @@ -2423,17 +2399,6 @@ static int __mpage_da_writepage(struct page *page, struct buffer_head *bh, *head; sector_t logical; - if (mpd->io_done) { - /* - * Rest of the page in the page_vec - * redirty then and skip then. We will - * try to write them again after - * starting a new transaction - */ - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return MPAGE_DA_EXTENT_TAIL; - } /* * Can we merge this page to current extent? */ @@ -2528,8 +2493,9 @@ static int __mpage_da_writepage(struct page *page, * initialized properly. */ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) + struct buffer_head *bh, int create) { + struct ext4_map_blocks map; int ret = 0; sector_t invalid_block = ~((sector_t) 0xffff); @@ -2537,16 +2503,22 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, invalid_block = ~0; BUG_ON(create == 0); - BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); + BUG_ON(bh->b_size != inode->i_sb->s_blocksize); + + map.m_lblk = iblock; + map.m_len = 1; /* * first, we need to know whether the block is allocated already * preallocated blocks are unmapped but should treated * the same as allocated blocks. */ - ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0); - if ((ret == 0) && !buffer_delay(bh_result)) { - /* the block isn't (pre)allocated yet, let's reserve space */ + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret < 0) + return ret; + if (ret == 0) { + if (buffer_delay(bh)) + return 0; /* Not sure this could or should happen */ /* * XXX: __block_prepare_write() unmaps passed block, * is it OK? @@ -2556,26 +2528,26 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, /* not enough space to reserve */ return ret; - map_bh(bh_result, inode->i_sb, invalid_block); - set_buffer_new(bh_result); - set_buffer_delay(bh_result); - } else if (ret > 0) { - bh_result->b_size = (ret << inode->i_blkbits); - if (buffer_unwritten(bh_result)) { - /* A delayed write to unwritten bh should - * be marked new and mapped. Mapped ensures - * that we don't do get_block multiple times - * when we write to the same offset and new - * ensures that we do proper zero out for - * partial write. - */ - set_buffer_new(bh_result); - set_buffer_mapped(bh_result); - } - ret = 0; + map_bh(bh, inode->i_sb, invalid_block); + set_buffer_new(bh); + set_buffer_delay(bh); + return 0; } - return ret; + map_bh(bh, inode->i_sb, map.m_pblk); + bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; + + if (buffer_unwritten(bh)) { + /* A delayed write to unwritten bh should be marked + * new and mapped. Mapped ensures that we don't do + * get_block multiple times when we write to the same + * offset and new ensures that we do proper zero out + * for partial write. + */ + set_buffer_new(bh); + set_buffer_mapped(bh); + } + return 0; } /* @@ -2597,21 +2569,8 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, static int noalloc_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - int ret = 0; - unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; - BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); - - /* - * we don't want to do block allocation in writepage - * so call get_block_wrap with create = 0 - */ - ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0); - if (ret > 0) { - bh_result->b_size = (ret << inode->i_blkbits); - ret = 0; - } - return ret; + return _ext4_get_block(inode, iblock, bh_result, 0); } static int bget_one(handle_t *handle, struct buffer_head *bh) @@ -2821,13 +2780,131 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode) * number of contiguous block. So we will limit * number of contiguous block to a sane value */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && (max_blocks > EXT4_MAX_TRANS_DATA)) max_blocks = EXT4_MAX_TRANS_DATA; return ext4_chunk_trans_blocks(inode, max_blocks); } +/* + * write_cache_pages_da - walk the list of dirty pages of the given + * address space and call the callback function (which usually writes + * the pages). + * + * This is a forked version of write_cache_pages(). Differences: + * Range cyclic is ignored. + * no_nrwrite_index_update is always presumed true + */ +static int write_cache_pages_da(struct address_space *mapping, + struct writeback_control *wbc, + struct mpage_da_data *mpd) +{ + int ret = 0; + int done = 0; + struct pagevec pvec; + int nr_pages; + pgoff_t index; + pgoff_t end; /* Inclusive */ + long nr_to_write = wbc->nr_to_write; + + pagevec_init(&pvec, 0); + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + + while (!done && (index <= end)) { + int i; + + nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* + * At this point, the page may be truncated or + * invalidated (changing page->mapping to NULL), or + * even swizzled back from swapper_space to tmpfs file + * mapping. However, page->index will not change + * because we have a reference on the page. + */ + if (page->index > end) { + done = 1; + break; + } + + lock_page(page); + + /* + * Page truncated or invalidated. We can freely skip it + * then, even for data integrity operations: the page + * has disappeared concurrently, so there could be no + * real expectation of this data interity operation + * even if there is now a new, dirty page at the same + * pagecache address. + */ + if (unlikely(page->mapping != mapping)) { +continue_unlock: + unlock_page(page); + continue; + } + + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + if (PageWriteback(page)) { + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + else + goto continue_unlock; + } + + BUG_ON(PageWriteback(page)); + if (!clear_page_dirty_for_io(page)) + goto continue_unlock; + + ret = __mpage_da_writepage(page, wbc, mpd); + if (unlikely(ret)) { + if (ret == AOP_WRITEPAGE_ACTIVATE) { + unlock_page(page); + ret = 0; + } else { + done = 1; + break; + } + } + + if (nr_to_write > 0) { + nr_to_write--; + if (nr_to_write == 0 && + wbc->sync_mode == WB_SYNC_NONE) { + /* + * We stop writing back only if we are + * not doing integrity sync. In case of + * integrity sync we have to keep going + * because someone may be concurrently + * dirtying pages, and we might have + * synced a lot of newly appeared dirty + * pages, but have not synced all of the + * old dirty pages. + */ + done = 1; + break; + } + } + } + pagevec_release(&pvec); + cond_resched(); + } + return ret; +} + + static int ext4_da_writepages(struct address_space *mapping, struct writeback_control *wbc) { @@ -2836,7 +2913,6 @@ static int ext4_da_writepages(struct address_space *mapping, handle_t *handle = NULL; struct mpage_da_data mpd; struct inode *inode = mapping->host; - int no_nrwrite_index_update; int pages_written = 0; long pages_skipped; unsigned int max_pages; @@ -2916,12 +2992,6 @@ static int ext4_da_writepages(struct address_space *mapping, mpd.wbc = wbc; mpd.inode = mapping->host; - /* - * we don't want write_cache_pages to update - * nr_to_write and writeback_index - */ - no_nrwrite_index_update = wbc->no_nrwrite_index_update; - wbc->no_nrwrite_index_update = 1; pages_skipped = wbc->pages_skipped; retry: @@ -2941,7 +3011,7 @@ retry: if (IS_ERR(handle)) { ret = PTR_ERR(handle); ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " - "%ld pages, ino %lu; err %d\n", __func__, + "%ld pages, ino %lu; err %d", __func__, wbc->nr_to_write, inode->i_ino, ret); goto out_writepages; } @@ -2963,8 +3033,7 @@ retry: mpd.io_done = 0; mpd.pages_written = 0; mpd.retval = 0; - ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, - &mpd); + ret = write_cache_pages_da(mapping, wbc, &mpd); /* * If we have a contiguous extent of pages and we * haven't done the I/O yet, map the blocks and submit @@ -3016,7 +3085,7 @@ retry: if (pages_skipped != wbc->pages_skipped) ext4_msg(inode->i_sb, KERN_CRIT, "This should not happen leaving %s " - "with nr_to_write = %ld ret = %d\n", + "with nr_to_write = %ld ret = %d", __func__, wbc->nr_to_write, ret); /* Update index */ @@ -3030,8 +3099,6 @@ retry: mapping->writeback_index = index; out_writepages: - if (!no_nrwrite_index_update) - wbc->no_nrwrite_index_update = 0; wbc->nr_to_write -= nr_to_writebump; wbc->range_start = range_start; trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); @@ -3076,7 +3143,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - int ret, retries = 0, quota_retries = 0; + int ret, retries = 0; struct page *page; pgoff_t index; unsigned from, to; @@ -3135,22 +3202,6 @@ retry: if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; - - if ((ret == -EDQUOT) && - EXT4_I(inode)->i_reserved_meta_blocks && - (quota_retries++ < 3)) { - /* - * Since we often over-estimate the number of meta - * data blocks required, we may sometimes get a - * spurios out of quota error even though there would - * be enough space once we write the data blocks and - * find out how many meta data blocks were _really_ - * required. So try forcing the inode write to see if - * that helps. - */ - write_inode_now(inode, (quota_retries == 3)); - goto retry; - } out: return ret; } @@ -3546,46 +3597,18 @@ out: return ret; } +/* + * ext4_get_block used when preparing for a DIO write or buffer write. + * We allocate an uinitialized extent if blocks haven't been allocated. + * The extent will be converted to initialized after the IO is complete. + */ static int ext4_get_block_write(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - handle_t *handle = ext4_journal_current_handle(); - int ret = 0; - unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; - int dio_credits; - int started = 0; - ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", inode->i_ino, create); - /* - * ext4_get_block in prepare for a DIO write or buffer write. - * We allocate an uinitialized extent if blocks haven't been allocated. - * The extent will be converted to initialized after IO complete. - */ - create = EXT4_GET_BLOCKS_IO_CREATE_EXT; - - if (!handle) { - if (max_blocks > DIO_MAX_BLOCKS) - max_blocks = DIO_MAX_BLOCKS; - dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); - handle = ext4_journal_start(inode, dio_credits); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - goto out; - } - started = 1; - } - - ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result, - create); - if (ret > 0) { - bh_result->b_size = (ret << inode->i_blkbits); - ret = 0; - } - if (started) - ext4_journal_stop(handle); -out: - return ret; + return _ext4_get_block(inode, iblock, bh_result, + EXT4_GET_BLOCKS_IO_CREATE_EXT); } static void dump_completed_IO(struct inode * inode) @@ -3973,7 +3996,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); @@ -4302,10 +4325,9 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, count)) { - ext4_error(inode->i_sb, "inode #%lu: " - "attempt to clear blocks %llu len %lu, invalid", - inode->i_ino, (unsigned long long) block_to_free, - count); + EXT4_ERROR_INODE(inode, "attempt to clear invalid " + "blocks %llu len %lu", + (unsigned long long) block_to_free, count); return 1; } @@ -4410,11 +4432,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) ext4_handle_dirty_metadata(handle, inode, this_bh); else - ext4_error(inode->i_sb, - "circular indirect block detected, " - "inode=%lu, block=%llu", - inode->i_ino, - (unsigned long long) this_bh->b_blocknr); + EXT4_ERROR_INODE(inode, + "circular indirect block detected at " + "block %llu", + (unsigned long long) this_bh->b_blocknr); } } @@ -4452,11 +4473,10 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), nr, 1)) { - ext4_error(inode->i_sb, - "indirect mapped block in inode " - "#%lu invalid (level %d, blk #%lu)", - inode->i_ino, depth, - (unsigned long) nr); + EXT4_ERROR_INODE(inode, + "invalid indirect mapped " + "block %lu (level %d)", + (unsigned long) nr, depth); break; } @@ -4468,9 +4488,9 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, * (should be rare). */ if (!bh) { - ext4_error(inode->i_sb, - "Read failure, inode=%lu, block=%llu", - inode->i_ino, nr); + EXT4_ERROR_INODE(inode, + "Read failure block=%llu", + (unsigned long long) nr); continue; } @@ -4612,12 +4632,12 @@ void ext4_truncate(struct inode *inode) if (!ext4_can_truncate(inode)) return; - EXT4_I(inode)->i_flags &= ~EXT4_EOFBLOCKS_FL; + ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); - if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { + if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { ext4_ext_truncate(inode); return; } @@ -4785,8 +4805,8 @@ static int __ext4_get_inode_loc(struct inode *inode, bh = sb_getblk(sb, block); if (!bh) { - ext4_error(sb, "unable to read inode block - " - "inode=%lu, block=%llu", inode->i_ino, block); + EXT4_ERROR_INODE(inode, "unable to read inode block - " + "block %llu", block); return -EIO; } if (!buffer_uptodate(bh)) { @@ -4884,8 +4904,8 @@ make_io: submit_bh(READ_META, bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { - ext4_error(sb, "unable to read inode block - inode=%lu," - " block=%llu", inode->i_ino, block); + EXT4_ERROR_INODE(inode, "unable to read inode " + "block %llu", block); brelse(bh); return -EIO; } @@ -5096,8 +5116,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) ret = 0; if (ei->i_file_acl && !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { - ext4_error(sb, "bad extended attribute block %llu inode #%lu", - ei->i_file_acl, inode->i_ino); + EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", + ei->i_file_acl); ret = -EIO; goto bad_inode; } else if (ei->i_flags & EXT4_EXTENTS_FL) { @@ -5142,8 +5162,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); } else { ret = -EIO; - ext4_error(inode->i_sb, "bogus i_mode (%o) for inode=%lu", - inode->i_mode, inode->i_ino); + EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); goto bad_inode; } brelse(iloc.bh); @@ -5381,9 +5400,9 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) if (wbc->sync_mode == WB_SYNC_ALL) sync_dirty_buffer(iloc.bh); if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { - ext4_error(inode->i_sb, "IO error syncing inode, " - "inode=%lu, block=%llu", inode->i_ino, - (unsigned long long)iloc.bh->b_blocknr); + EXT4_ERROR_INODE(inode, + "IO error syncing inode (block=%llu)", + (unsigned long long) iloc.bh->b_blocknr); err = -EIO; } brelse(iloc.bh); @@ -5455,7 +5474,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) } if (attr->ia_valid & ATTR_SIZE) { - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (attr->ia_size > sbi->s_bitmap_maxbytes) { @@ -5468,7 +5487,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE && (attr->ia_size < inode->i_size || - (EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL))) { + (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) { handle_t *handle; handle = ext4_journal_start(inode, 3); @@ -5500,7 +5519,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) } } /* ext4_truncate will clear the flag */ - if ((EXT4_I(inode)->i_flags & EXT4_EOFBLOCKS_FL)) + if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) ext4_truncate(inode); } @@ -5576,7 +5595,7 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) { - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return ext4_indirect_trans_blocks(inode, nrblocks, chunk); return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); } @@ -5911,9 +5930,9 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) */ if (val) - EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; + ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); else - EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; + ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); ext4_set_aops(inode); jbd2_journal_unlock_updates(journal); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 016d0249294f..bf5ae883b1bd 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -258,7 +258,7 @@ setversion_out: if (me.moved_len > 0) file_remove_suid(donor_filp); - if (copy_to_user((struct move_extent __user *)arg, + if (copy_to_user((struct move_extent __user *)arg, &me, sizeof(me))) err = -EFAULT; mext_out: @@ -373,7 +373,30 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC32_SETRSVSZ: cmd = EXT4_IOC_SETRSVSZ; break; - case EXT4_IOC_GROUP_ADD: + case EXT4_IOC32_GROUP_ADD: { + struct compat_ext4_new_group_input __user *uinput; + struct ext4_new_group_input input; + mm_segment_t old_fs; + int err; + + uinput = compat_ptr(arg); + err = get_user(input.group, &uinput->group); + err |= get_user(input.block_bitmap, &uinput->block_bitmap); + err |= get_user(input.inode_bitmap, &uinput->inode_bitmap); + err |= get_user(input.inode_table, &uinput->inode_table); + err |= get_user(input.blocks_count, &uinput->blocks_count); + err |= get_user(input.reserved_blocks, + &uinput->reserved_blocks); + if (err) + return -EFAULT; + old_fs = get_fs(); + set_fs(KERNEL_DS); + err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD, + (unsigned long) &input); + set_fs(old_fs); + return err; + } + case EXT4_IOC_MOVE_EXT: break; default: return -ENOIOCTLCMD; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b423a364dca3..12b3bc026a68 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -658,6 +658,27 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, } } +/* + * Cache the order of the largest free extent we have available in this block + * group. + */ +static void +mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) +{ + int i; + int bits; + + grp->bb_largest_free_order = -1; /* uninit */ + + bits = sb->s_blocksize_bits + 1; + for (i = bits; i >= 0; i--) { + if (grp->bb_counters[i] > 0) { + grp->bb_largest_free_order = i; + break; + } + } +} + static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) @@ -700,6 +721,7 @@ void ext4_mb_generate_buddy(struct super_block *sb, */ grp->bb_free = free; } + mb_set_largest_free_order(sb, grp); clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); @@ -725,6 +747,9 @@ void ext4_mb_generate_buddy(struct super_block *sb, * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 + * + * Locking note: This routine takes the block group lock of all groups + * for this page; do not hold this lock when calling this routine! */ static int ext4_mb_init_cache(struct page *page, char *incore) @@ -865,6 +890,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) BUG_ON(incore == NULL); mb_debug(1, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); + trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, @@ -882,6 +908,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) BUG_ON(incore != NULL); mb_debug(1, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); + trace_ext4_mb_bitmap_load(sb, group); /* see comments in ext4_mb_put_pa() */ ext4_lock_group(sb, group); @@ -910,6 +937,11 @@ out: return err; } +/* + * Locking note: This routine calls ext4_mb_init_cache(), which takes the + * block group lock of all groups for this page; do not hold the BG lock when + * calling this routine! + */ static noinline_for_stack int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) { @@ -1004,6 +1036,11 @@ err: return ret; } +/* + * Locking note: This routine calls ext4_mb_init_cache(), which takes the + * block group lock of all groups for this page; do not hold the BG lock when + * calling this routine! + */ static noinline_for_stack int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) @@ -1150,7 +1187,7 @@ err: return ret; } -static void ext4_mb_release_desc(struct ext4_buddy *e4b) +static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) page_cache_release(e4b->bd_bitmap_page); @@ -1299,6 +1336,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, buddy = buddy2; } while (1); } + mb_set_largest_free_order(sb, e4b->bd_info); mb_check_buddy(e4b); } @@ -1427,6 +1465,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) e4b->bd_info->bb_counters[ord]++; e4b->bd_info->bb_counters[ord]++; } + mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0); mb_check_buddy(e4b); @@ -1617,7 +1656,7 @@ int ext4_mb_try_best_found(struct ext4_allocation_context *ac, } ext4_unlock_group(ac->ac_sb, group); - ext4_mb_release_desc(e4b); + ext4_mb_unload_buddy(e4b); return 0; } @@ -1672,7 +1711,7 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, ext4_mb_use_best_found(ac, e4b); } ext4_unlock_group(ac->ac_sb, group); - ext4_mb_release_desc(e4b); + ext4_mb_unload_buddy(e4b); return 0; } @@ -1821,16 +1860,22 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, } } +/* This is now called BEFORE we load the buddy bitmap. */ static int ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { unsigned free, fragments; - unsigned i, bits; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); BUG_ON(cr < 0 || cr >= 4); - BUG_ON(EXT4_MB_GRP_NEED_INIT(grp)); + + /* We only do this if the grp has never been initialized */ + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { + int ret = ext4_mb_init_group(ac->ac_sb, group); + if (ret) + return 0; + } free = grp->bb_free; fragments = grp->bb_fragments; @@ -1843,17 +1888,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac, case 0: BUG_ON(ac->ac_2order == 0); + if (grp->bb_largest_free_order < ac->ac_2order) + return 0; + /* Avoid using the first bg of a flexgroup for data files */ if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) return 0; - bits = ac->ac_sb->s_blocksize_bits + 1; - for (i = ac->ac_2order; i <= bits; i++) - if (grp->bb_counters[i] > 0) - return 1; - break; + return 1; case 1: if ((free / fragments) >= ac->ac_g_ex.fe_len) return 1; @@ -1964,7 +2008,7 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); /* non-extent files are limited to low blocks/groups */ - if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) ngroups = sbi->s_blockfile_groups; BUG_ON(ac->ac_status == AC_STATUS_FOUND); @@ -2024,15 +2068,11 @@ repeat: group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { - struct ext4_group_info *grp; - struct ext4_group_desc *desc; - if (group == ngroups) group = 0; - /* quick check to skip empty groups */ - grp = ext4_get_group_info(sb, group); - if (grp->bb_free == 0) + /* This now checks without needing the buddy page */ + if (!ext4_mb_good_group(ac, group, cr)) continue; err = ext4_mb_load_buddy(sb, group, &e4b); @@ -2040,15 +2080,18 @@ repeat: goto out; ext4_lock_group(sb, group); + + /* + * We need to check again after locking the + * block group + */ if (!ext4_mb_good_group(ac, group, cr)) { - /* someone did allocation from this group */ ext4_unlock_group(sb, group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); continue; } ac->ac_groups_scanned++; - desc = ext4_get_group_desc(sb, group, NULL); if (cr == 0) ext4_mb_simple_scan_group(ac, &e4b); else if (cr == 1 && @@ -2058,7 +2101,7 @@ repeat: ext4_mb_complex_scan_group(ac, &e4b); ext4_unlock_group(sb, group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); if (ac->ac_status != AC_STATUS_CONTINUE) break; @@ -2148,7 +2191,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) ext4_lock_group(sb, group); memcpy(&sg, ext4_get_group_info(sb, group), i); ext4_unlock_group(sb, group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, sg.info.bb_fragments, sg.info.bb_first_free); @@ -2255,6 +2298,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); meta_group_info[i]->bb_free_root = RB_ROOT; + meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ #ifdef DOUBLE_CHECK { @@ -2536,6 +2580,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) entry->count, entry->group, entry); if (test_opt(sb, DISCARD)) { + int ret; ext4_fsblk_t discard_block; discard_block = entry->start_blk + @@ -2543,7 +2588,12 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) trace_ext4_discard_blocks(sb, (unsigned long long)discard_block, entry->count); - sb_issue_discard(sb, discard_block, entry->count); + ret = sb_issue_discard(sb, discard_block, entry->count); + if (ret == EOPNOTSUPP) { + ext4_warning(sb, + "discard not supported, disabling"); + clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); + } } err = ext4_mb_load_buddy(sb, entry->group, &e4b); @@ -2568,7 +2618,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) } ext4_unlock_group(sb, entry->group); kmem_cache_free(ext4_free_ext_cachep, entry); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); } mb_debug(1, "freed %u blocks in %u structures\n", count, count2); @@ -2641,7 +2691,7 @@ int __init init_ext4_mballoc(void) void exit_ext4_mballoc(void) { - /* + /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. */ @@ -2981,7 +3031,7 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); - if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len) + if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && @@ -3123,7 +3173,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) continue; /* non-extent files can't have physical blocks past 2^32 */ - if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) && + if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS) continue; @@ -3280,7 +3330,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; - /* + /* * If doing group-based preallocation, pa_pstart may be in the * next group when pa is used up */ @@ -3697,7 +3747,7 @@ out: ext4_unlock_group(sb, group); if (ac) kmem_cache_free(ext4_ac_cachep, ac); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); return free; } @@ -3801,7 +3851,7 @@ repeat: if (bitmap_bh == NULL) { ext4_error(sb, "Error reading block bitmap for %u", group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); continue; } @@ -3810,7 +3860,7 @@ repeat: ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac); ext4_unlock_group(sb, group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); list_del(&pa->u.pa_tmp_list); @@ -4074,7 +4124,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, ext4_mb_release_group_pa(&e4b, pa, ac); ext4_unlock_group(sb, group); - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } @@ -4484,12 +4534,12 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, if (!bh) tbh = sb_find_get_block(inode->i_sb, block + i); - ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, + ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, tbh, block + i); } } - /* + /* * We need to make sure we don't reuse the freed block until * after the transaction is committed, which we can do by * treating the block as metadata, below. We make an @@ -4610,7 +4660,7 @@ do_more: atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks); } - ext4_mb_release_desc(&e4b); + ext4_mb_unload_buddy(&e4b); freed += count; diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 34dcfc52ef44..6f3a27ec30bf 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode) */ if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || - (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index d1fc662cc311..3a6c92ac131c 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -482,6 +482,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, int depth = ext_depth(orig_inode); int ret; + start_ext.ee_block = end_ext.ee_block = 0; o_start = o_end = oext = orig_path[depth].p_ext; oext_alen = ext4_ext_get_actual_len(oext); start_ext.ee_len = end_ext.ee_len = 0; @@ -529,7 +530,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, * new_ext |-------| */ if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { - ext4_error(orig_inode->i_sb, + EXT4_ERROR_INODE(orig_inode, "new_ext_end(%u) should be less than or equal to " "oext->ee_block(%u) + oext_alen(%d) - 1", new_ext_end, le32_to_cpu(oext->ee_block), @@ -692,12 +693,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, while (1) { /* The extent for donor must be found. */ if (!dext) { - ext4_error(donor_inode->i_sb, + EXT4_ERROR_INODE(donor_inode, "The extent for donor must be found"); *err = -EIO; goto out; } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { - ext4_error(donor_inode->i_sb, + EXT4_ERROR_INODE(donor_inode, "Donor offset(%u) and the first block of donor " "extent(%u) should be equal", donor_off, @@ -976,11 +977,11 @@ mext_check_arguments(struct inode *orig_inode, } /* Ext4 move extent supports only extent based file */ - if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) { + if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " "based file [ino:orig %lu]\n", orig_inode->i_ino); return -EOPNOTSUPP; - } else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) { + } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: donor file is not extents " "based file [ino:donor %lu]\n", donor_inode->i_ino); return -EOPNOTSUPP; @@ -1354,7 +1355,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, if (ret1 < 0) break; if (*moved_len > len) { - ext4_error(orig_inode->i_sb, + EXT4_ERROR_INODE(orig_inode, "We replaced blocks too much! " "sum of replaced: %llu requested: %llu", *moved_len, len); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 0c070fabd108..a43e6617b351 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -187,7 +187,7 @@ unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize) return blocksize; return (len & 65532) | ((len & 3) << 16); } - + __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) { if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) @@ -197,7 +197,7 @@ __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) if (len == blocksize) { if (blocksize == 65536) return cpu_to_le16(EXT4_MAX_REC_LEN); - else + else return cpu_to_le16(0); } return cpu_to_le16((len & 65532) | ((len >> 16) & 3)); @@ -349,7 +349,7 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, brelse(bh); } if (bcount) - printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", + printk(KERN_DEBUG "%snames %u, fullness %u (%u%%)\n", levels ? "" : " ", names, space/bcount, (space/bcount)*100/blocksize); return (struct stats) { names, space, bcount}; @@ -653,10 +653,10 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, int ret, err; __u32 hashval; - dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", + dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n", start_hash, start_minor_hash)); dir = dir_file->f_path.dentry->d_inode; - if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) { + if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) { hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version; if (hinfo.hash_version <= DX_HASH_TEA) hinfo.hash_version += @@ -801,7 +801,7 @@ static void ext4_update_dx_flag(struct inode *inode) { if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) - EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL; + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); } /* @@ -943,8 +943,8 @@ restart: wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ - ext4_error(sb, "reading directory #%lu offset %lu", - dir->i_ino, (unsigned long)block); + EXT4_ERROR_INODE(dir, "reading directory lblock %lu", + (unsigned long) block); brelse(bh); goto next; } @@ -1066,15 +1066,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru __u32 ino = le32_to_cpu(de->inode); brelse(bh); if (!ext4_valid_inum(dir->i_sb, ino)) { - ext4_error(dir->i_sb, "bad inode number: %u", ino); + EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); return ERR_PTR(-EIO); } inode = ext4_iget(dir->i_sb, ino); if (unlikely(IS_ERR(inode))) { if (PTR_ERR(inode) == -ESTALE) { - ext4_error(dir->i_sb, - "deleted inode referenced: %u", - ino); + EXT4_ERROR_INODE(dir, + "deleted inode referenced: %u", + ino); return ERR_PTR(-EIO); } else { return ERR_CAST(inode); @@ -1104,8 +1104,8 @@ struct dentry *ext4_get_parent(struct dentry *child) brelse(bh); if (!ext4_valid_inum(child->d_inode->i_sb, ino)) { - ext4_error(child->d_inode->i_sb, - "bad inode number: %u", ino); + EXT4_ERROR_INODE(child->d_inode, + "bad parent inode number: %u", ino); return ERR_PTR(-EIO); } @@ -1141,7 +1141,7 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, unsigned rec_len = 0; while (count--) { - struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) + struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + (map->offs<<2)); rec_len = EXT4_DIR_REC_LEN(de->name_len); memcpy (to, de, rec_len); @@ -1404,9 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, de = (struct ext4_dir_entry_2 *)((char *)fde + ext4_rec_len_from_disk(fde->rec_len, blocksize)); if ((char *) de >= (((char *) root) + blocksize)) { - ext4_error(dir->i_sb, - "invalid rec_len for '..' in inode %lu", - dir->i_ino); + EXT4_ERROR_INODE(dir, "invalid rec_len for '..'"); brelse(bh); return -EIO; } @@ -1418,7 +1416,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, brelse(bh); return retval; } - EXT4_I(dir)->i_flags |= EXT4_INDEX_FL; + ext4_set_inode_flag(dir, EXT4_INODE_INDEX); data1 = bh2->b_data; memcpy (data1, de, len); @@ -1491,7 +1489,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, retval = ext4_dx_add_entry(handle, dentry, inode); if (!retval || (retval != ERR_BAD_DX_DIR)) return retval; - EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL; + ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); dx_fallback++; ext4_mark_inode_dirty(handle, dir); } @@ -1519,6 +1517,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); retval = add_dirent_to_buf(handle, dentry, inode, de, bh); brelse(bh); + if (retval == 0) + ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY); return retval; } @@ -1915,9 +1915,8 @@ static int empty_dir(struct inode *inode) if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) || !(bh = ext4_bread(NULL, inode, 0, 0, &err))) { if (err) - ext4_error(inode->i_sb, - "error %d reading directory #%lu offset 0", - err, inode->i_ino); + EXT4_ERROR_INODE(inode, + "error %d reading directory lblock 0", err); else ext4_warning(inode->i_sb, "bad directory (dir #%lu) - no data block", @@ -1941,17 +1940,17 @@ static int empty_dir(struct inode *inode) de = ext4_next_entry(de1, sb->s_blocksize); while (offset < inode->i_size) { if (!bh || - (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { + (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) { + unsigned int lblock; err = 0; brelse(bh); - bh = ext4_bread(NULL, inode, - offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err); + lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb); + bh = ext4_bread(NULL, inode, lblock, 0, &err); if (!bh) { if (err) - ext4_error(sb, - "error %d reading directory" - " #%lu offset %u", - err, inode->i_ino, offset); + EXT4_ERROR_INODE(inode, + "error %d reading directory " + "lblock %u", err, lblock); offset += sb->s_blocksize; continue; } @@ -2297,7 +2296,7 @@ retry: } } else { /* clear the extent format for fast symlink */ - EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL; + ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); inode->i_op = &ext4_fast_symlink_inode_operations; memcpy((char *)&EXT4_I(inode)->i_data, symname, l); inode->i_size = l-1; diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 5692c48754a0..6df797eb9aeb 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) percpu_counter_add(&sbi->s_freeinodes_counter, EXT4_INODES_PER_GROUP(sb)); - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && + sbi->s_log_groups_per_flex) { ext4_group_t flex_group; flex_group = ext4_flex_group(sbi, input->group); atomic_add(input->free_blocks_count, diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e14d22c170d5..4e8983a9811b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -241,6 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); + vfs_check_frozen(sb, SB_FREEZE_WRITE); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ @@ -645,6 +646,8 @@ static void ext4_put_super(struct super_block *sb) struct ext4_super_block *es = sbi->s_es; int i, err; + dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + flush_workqueue(sbi->dio_unwritten_wq); destroy_workqueue(sbi->dio_unwritten_wq); @@ -941,6 +944,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) seq_puts(seq, ",journal_async_commit"); + else if (test_opt(sb, JOURNAL_CHECKSUM)) + seq_puts(seq, ",journal_checksum"); if (test_opt(sb, NOBH)) seq_puts(seq, ",nobh"); if (test_opt(sb, I_VERSION)) @@ -1059,7 +1064,7 @@ static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount); + char *path); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); @@ -1081,12 +1086,12 @@ static const struct dquot_operations ext4_quota_operations = { static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk + .quota_off = dquot_quota_off, + .quota_sync = dquot_quota_sync, + .get_info = dquot_get_dqinfo, + .set_info = dquot_set_dqinfo, + .get_dqblk = dquot_get_dqblk, + .set_dqblk = dquot_set_dqblk }; #endif @@ -2051,7 +2056,7 @@ static void ext4_orphan_cleanup(struct super_block *sb, /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) - vfs_quota_off(sb, i, 0); + dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ @@ -2213,7 +2218,7 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) struct ext4_attr { struct attribute attr; ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *); - ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, + ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *, const char *, size_t); int offset; }; @@ -2430,6 +2435,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) __releases(kernel_lock) __acquires(kernel_lock) { + char *orig_data = kstrdup(data, GFP_KERNEL); struct buffer_head *bh; struct ext4_super_block *es = NULL; struct ext4_sb_info *sbi; @@ -2793,24 +2799,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); - err = percpu_counter_init(&sbi->s_freeblocks_counter, - ext4_count_free_blocks(sb)); - if (!err) { - err = percpu_counter_init(&sbi->s_freeinodes_counter, - ext4_count_free_inodes(sb)); - } - if (!err) { - err = percpu_counter_init(&sbi->s_dirs_counter, - ext4_count_dirs(sb)); - } - if (!err) { - err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); - } - if (err) { - ext4_msg(sb, KERN_ERR, "insufficient memory"); - goto failed_mount3; - } - sbi->s_stripe = ext4_get_stripe_size(sbi); sbi->s_max_writeback_mb_bump = 128; @@ -2910,6 +2898,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); no_journal: + err = percpu_counter_init(&sbi->s_freeblocks_counter, + ext4_count_free_blocks(sb)); + if (!err) + err = percpu_counter_init(&sbi->s_freeinodes_counter, + ext4_count_free_inodes(sb)); + if (!err) + err = percpu_counter_init(&sbi->s_dirs_counter, + ext4_count_dirs(sb)); + if (!err) + err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); + if (err) { + ext4_msg(sb, KERN_ERR, "insufficient memory"); + goto failed_mount_wq; + } if (test_opt(sb, NOBH)) { if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) { ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - " @@ -3001,7 +3003,7 @@ no_journal: err = ext4_setup_system_zone(sb); if (err) { ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)\n", err); + "zone (%d)", err); goto failed_mount4; } @@ -3040,9 +3042,11 @@ no_journal: } else descr = "out journal"; - ext4_msg(sb, KERN_INFO, "mounted filesystem with%s", descr); + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " + "Opts: %s", descr, orig_data); lock_kernel(); + kfree(orig_data); return 0; cantfind_ext4: @@ -3059,6 +3063,10 @@ failed_mount_wq: jbd2_journal_destroy(sbi->s_journal); sbi->s_journal = NULL; } + percpu_counter_destroy(&sbi->s_freeblocks_counter); + percpu_counter_destroy(&sbi->s_freeinodes_counter); + percpu_counter_destroy(&sbi->s_dirs_counter); + percpu_counter_destroy(&sbi->s_dirtyblocks_counter); failed_mount3: if (sbi->s_flex_groups) { if (is_vmalloc_addr(sbi->s_flex_groups)) @@ -3066,10 +3074,6 @@ failed_mount3: else kfree(sbi->s_flex_groups); } - percpu_counter_destroy(&sbi->s_freeblocks_counter); - percpu_counter_destroy(&sbi->s_freeinodes_counter); - percpu_counter_destroy(&sbi->s_dirs_counter); - percpu_counter_destroy(&sbi->s_dirtyblocks_counter); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); @@ -3089,6 +3093,7 @@ out_fail: kfree(sbi->s_blockgroup_lock); kfree(sbi); lock_kernel(); + kfree(orig_data); return ret; } @@ -3380,7 +3385,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_kbytes_written = - cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + + cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - EXT4_SB(sb)->s_sectors_written_start) >> 1)); ext4_free_blocks_count_set(es, percpu_counter_sum_positive( @@ -3485,8 +3490,10 @@ int ext4_force_commit(struct super_block *sb) return 0; journal = EXT4_SB(sb)->s_journal; - if (journal) + if (journal) { + vfs_check_frozen(sb, SB_FREEZE_WRITE); ret = ext4_journal_force_commit(journal); + } return ret; } @@ -3535,18 +3542,16 @@ static int ext4_freeze(struct super_block *sb) * the journal. */ error = jbd2_journal_flush(journal); - if (error < 0) { - out: - jbd2_journal_unlock_updates(journal); - return error; - } + if (error < 0) + goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); - if (error) - goto out; - return 0; +out: + /* we rely on s_frozen to stop further updates */ + jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); + return error; } /* @@ -3563,7 +3568,6 @@ static int ext4_unfreeze(struct super_block *sb) EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ext4_commit_super(sb, 1); unlock_super(sb); - jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return 0; } @@ -3574,12 +3578,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext4_mount_options old_opts; + int enable_quota = 0; ext4_group_t g; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; int err; #ifdef CONFIG_QUOTA int i; #endif + char *orig_data = kstrdup(data, GFP_KERNEL); lock_kernel(); @@ -3630,6 +3636,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } if (*flags & MS_RDONLY) { + err = dquot_suspend(sb, -1); + if (err < 0) + goto restore_opts; + /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount @@ -3698,6 +3708,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; if (!ext4_setup_super(sb, es, 0)) sb->s_flags &= ~MS_RDONLY; + enable_quota = 1; } } ext4_setup_system_zone(sb); @@ -3713,6 +3724,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) #endif unlock_super(sb); unlock_kernel(); + if (enable_quota) + dquot_resume(sb, -1); + + ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); + kfree(orig_data); return 0; restore_opts: @@ -3734,6 +3750,7 @@ restore_opts: #endif unlock_super(sb); unlock_kernel(); + kfree(orig_data); return err; } @@ -3906,24 +3923,21 @@ static int ext4_write_info(struct super_block *sb, int type) */ static int ext4_quota_on_mount(struct super_block *sb, int type) { - return vfs_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], - EXT4_SB(sb)->s_jquota_fmt, type); + return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type], + EXT4_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, - char *name, int remount) + char *name) { int err; struct path path; if (!test_opt(sb, QUOTA)) return -EINVAL; - /* When remounting, no checks are needed and in fact, name is NULL */ - if (remount) - return vfs_quota_on(sb, type, format_id, name, remount); err = kern_path(name, LOOKUP_FOLLOW, &path); if (err) @@ -3962,7 +3976,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, } } - err = vfs_quota_on_path(sb, type, format_id, &path); + err = dquot_quota_on_path(sb, type, format_id, &path); path_put(&path); return err; } @@ -4141,6 +4155,7 @@ static int __init init_ext4_fs(void) { int err; + ext4_check_flag_values(); err = init_ext4_system_zone(); if (err) return err; diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 00740cb32be3..ed9354aff279 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -34,6 +34,7 @@ const struct inode_operations ext4_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, + .setattr = ext4_setattr, #ifdef CONFIG_EXT4_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, @@ -45,6 +46,7 @@ const struct inode_operations ext4_symlink_inode_operations = { const struct inode_operations ext4_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ext4_follow_link, + .setattr = ext4_setattr, #ifdef CONFIG_EXT4_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 2de0e9515089..04338009793a 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -228,9 +228,8 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(bh)) { bad_block: - ext4_error(inode->i_sb, - "inode %lu: bad block %llu", inode->i_ino, - EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } @@ -372,9 +371,8 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size) ea_bdebug(bh, "b_count=%d, refcount=%d", atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); if (ext4_xattr_check_block(bh)) { - ext4_error(inode->i_sb, - "inode %lu: bad block %llu", inode->i_ino, - EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } @@ -666,8 +664,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, atomic_read(&(bs->bh->b_count)), le32_to_cpu(BHDR(bs->bh)->h_refcount)); if (ext4_xattr_check_block(bs->bh)) { - ext4_error(sb, "inode %lu: bad block %llu", - inode->i_ino, EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } @@ -820,7 +818,7 @@ inserted: EXT4_I(inode)->i_block_group); /* non-extent files can't have physical blocks past 2^32 */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; block = ext4_new_meta_blocks(handle, inode, @@ -828,7 +826,7 @@ inserted: if (error) goto cleanup; - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); ea_idebug(inode, "creating block %d", block); @@ -880,8 +878,8 @@ cleanup_dquot: goto cleanup; bad_block: - ext4_error(inode->i_sb, "inode %lu: bad block %llu", - inode->i_ino, EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); goto cleanup; #undef header @@ -1194,8 +1192,8 @@ retry: if (!bh) goto cleanup; if (ext4_xattr_check_block(bh)) { - ext4_error(inode->i_sb, "inode %lu: bad block %llu", - inode->i_ino, EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); error = -EIO; goto cleanup; } @@ -1372,14 +1370,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) goto cleanup; bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); if (!bh) { - ext4_error(inode->i_sb, "inode %lu: block %llu read error", - inode->i_ino, EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "block %llu read error", + EXT4_I(inode)->i_file_acl); goto cleanup; } if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || BHDR(bh)->h_blocks != cpu_to_le32(1)) { - ext4_error(inode->i_sb, "inode %lu: bad block %llu", - inode->i_ino, EXT4_I(inode)->i_file_acl); + EXT4_ERROR_INODE(inode, "bad block %llu", + EXT4_I(inode)->i_file_acl); goto cleanup; } ext4_xattr_release_block(handle, inode, bh); @@ -1504,9 +1502,8 @@ again: } bh = sb_bread(inode->i_sb, ce->e_block); if (!bh) { - ext4_error(inode->i_sb, - "inode %lu: block %lu read error", - inode->i_ino, (unsigned long) ce->e_block); + EXT4_ERROR_INODE(inode, "block %lu read error", + (unsigned long) ce->e_block); } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= EXT4_XATTR_REFCOUNT_MAX) { ea_idebug(inode, "block %lu refcount %d>=%d", diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 53dba57b49a1..27ac25725954 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -306,11 +306,11 @@ extern long fat_generic_ioctl(struct file *filp, unsigned int cmd, extern const struct file_operations fat_file_operations; extern const struct inode_operations fat_file_inode_operations; extern int fat_setattr(struct dentry * dentry, struct iattr * attr); -extern void fat_truncate(struct inode *inode); +extern int fat_setsize(struct inode *inode, loff_t offset); +extern void fat_truncate_blocks(struct inode *inode, loff_t offset); extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -extern int fat_file_fsync(struct file *file, struct dentry *dentry, - int datasync); +extern int fat_file_fsync(struct file *file, int datasync); /* fat/inode.c */ extern void fat_attach(struct inode *inode, loff_t i_pos); diff --git a/fs/fat/file.c b/fs/fat/file.c index a14c2f6a489e..990dfae022e5 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -149,12 +149,12 @@ static int fat_file_release(struct inode *inode, struct file *filp) return 0; } -int fat_file_fsync(struct file *filp, struct dentry *dentry, int datasync) +int fat_file_fsync(struct file *filp, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; int res, err; - res = simple_fsync(filp, dentry, datasync); + res = generic_file_fsync(filp, datasync); err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); return res ? res : err; @@ -283,7 +283,7 @@ static int fat_free(struct inode *inode, int skip) return fat_free_clusters(inode, free_start); } -void fat_truncate(struct inode *inode) +void fat_truncate_blocks(struct inode *inode, loff_t offset) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); const unsigned int cluster_size = sbi->cluster_size; @@ -293,10 +293,10 @@ void fat_truncate(struct inode *inode) * This protects against truncating a file bigger than it was then * trying to write into the hole. */ - if (MSDOS_I(inode)->mmu_private > inode->i_size) - MSDOS_I(inode)->mmu_private = inode->i_size; + if (MSDOS_I(inode)->mmu_private > offset) + MSDOS_I(inode)->mmu_private = offset; - nr_clusters = (inode->i_size + (cluster_size - 1)) >> sbi->cluster_bits; + nr_clusters = (offset + (cluster_size - 1)) >> sbi->cluster_bits; fat_free(inode, nr_clusters); fat_flush_inodes(inode->i_sb, inode, NULL); @@ -364,6 +364,18 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode) return 0; } +int fat_setsize(struct inode *inode, loff_t offset) +{ + int error; + + error = simple_setsize(inode, offset); + if (error) + return error; + fat_truncate_blocks(inode, offset); + + return error; +} + #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET) /* valid file mode bits */ #define FAT_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXUGO) @@ -378,7 +390,8 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr) /* * Expand the file. Since inode_setattr() updates ->i_size * before calling the ->truncate(), but FAT needs to fill the - * hole before it. + * hole before it. XXX: this is no longer true with new truncate + * sequence. */ if (attr->ia_valid & ATTR_SIZE) { if (attr->ia_size > inode->i_size) { @@ -427,15 +440,20 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr) attr->ia_valid &= ~ATTR_MODE; } - if (attr->ia_valid) - error = inode_setattr(inode, attr); + if (attr->ia_valid & ATTR_SIZE) { + error = fat_setsize(inode, attr->ia_size); + if (error) + goto out; + } + + generic_setattr(inode, attr); + mark_inode_dirty(inode); out: return error; } EXPORT_SYMBOL_GPL(fat_setattr); const struct inode_operations fat_file_inode_operations = { - .truncate = fat_truncate, .setattr = fat_setattr, .getattr = fat_getattr, }; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index ed33904926ee..7bf45aee56d7 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -142,14 +142,29 @@ static int fat_readpages(struct file *file, struct address_space *mapping, return mpage_readpages(mapping, pages, nr_pages, fat_get_block); } +static void fat_write_failed(struct address_space *mapping, loff_t to) +{ + struct inode *inode = mapping->host; + + if (to > inode->i_size) { + truncate_pagecache(inode, to, inode->i_size); + fat_truncate_blocks(inode, inode->i_size); + } +} + static int fat_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { + int err; + *pagep = NULL; - return cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, - fat_get_block, + err = cont_write_begin_newtrunc(file, mapping, pos, len, flags, + pagep, fsdata, fat_get_block, &MSDOS_I(mapping->host)->mmu_private); + if (err < 0) + fat_write_failed(mapping, pos + len); + return err; } static int fat_write_end(struct file *file, struct address_space *mapping, @@ -159,6 +174,8 @@ static int fat_write_end(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; int err; err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); + if (err < len) + fat_write_failed(mapping, pos + len); if (!(err < 0) && !(MSDOS_I(inode)->i_attrs & ATTR_ARCH)) { inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; MSDOS_I(inode)->i_attrs |= ATTR_ARCH; @@ -172,7 +189,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t ret; if (rw == WRITE) { /* @@ -193,8 +212,12 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, * FAT need to use the DIO_LOCKING for avoiding the race * condition of fat_get_block() and ->truncate(). */ - return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, fat_get_block, NULL); + ret = blockdev_direct_IO_newtrunc(rw, iocb, inode, inode->i_sb->s_bdev, + iov, offset, nr_segs, fat_get_block, NULL); + if (ret < 0 && (rw & WRITE)) + fat_write_failed(mapping, offset + iov_length(iov, nr_segs)); + + return ret; } static sector_t _fat_bmap(struct address_space *mapping, sector_t block) @@ -429,7 +452,7 @@ static void fat_delete_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); inode->i_size = 0; - fat_truncate(inode); + fat_truncate_blocks(inode, 0); clear_inode(inode); } diff --git a/fs/file_table.c b/fs/file_table.c index 32d12b78bac8..5c7d10ead4ad 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -194,14 +194,6 @@ struct file *alloc_file(struct path *path, fmode_t mode, } EXPORT_SYMBOL(alloc_file); -void fput(struct file *file) -{ - if (atomic_long_dec_and_test(&file->f_count)) - __fput(file); -} - -EXPORT_SYMBOL(fput); - /** * drop_file_write_access - give up ability to write to a file * @file: the file to which we will stop writing @@ -227,10 +219,9 @@ void drop_file_write_access(struct file *file) } EXPORT_SYMBOL_GPL(drop_file_write_access); -/* __fput is called from task context when aio completion releases the last - * last use of a struct file *. Do not use otherwise. +/* the real guts of fput() - releasing the last reference to file */ -void __fput(struct file *file) +static void __fput(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct vfsmount *mnt = file->f_path.mnt; @@ -268,6 +259,14 @@ void __fput(struct file *file) mntput(mnt); } +void fput(struct file *file) +{ + if (atomic_long_dec_and_test(&file->f_count)) + __fput(file); +} + +EXPORT_SYMBOL(fput); + struct file *fget(unsigned int fd) { struct file *file; diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index aee049cb9f84..0ec7bb2c95c6 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -57,6 +57,8 @@ const struct inode_operations vxfs_dir_inode_ops = { }; const struct file_operations vxfs_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, .readdir = vxfs_readdir, }; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 47aefd376e54..723b889fd219 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -710,30 +710,26 @@ static void fscache_write_op(struct fscache_operation *_op) goto superseded; } - if (page) { - radix_tree_tag_set(&cookie->stores, page->index, - FSCACHE_COOKIE_STORING_TAG); - radix_tree_tag_clear(&cookie->stores, page->index, - FSCACHE_COOKIE_PENDING_TAG); - } + radix_tree_tag_set(&cookie->stores, page->index, + FSCACHE_COOKIE_STORING_TAG); + radix_tree_tag_clear(&cookie->stores, page->index, + FSCACHE_COOKIE_PENDING_TAG); spin_unlock(&cookie->stores_lock); spin_unlock(&object->lock); - if (page) { - fscache_set_op_state(&op->op, "Store"); - fscache_stat(&fscache_n_store_pages); - fscache_stat(&fscache_n_cop_write_page); - ret = object->cache->ops->write_page(op, page); - fscache_stat_d(&fscache_n_cop_write_page); - fscache_set_op_state(&op->op, "EndWrite"); - fscache_end_page_write(object, page); - if (ret < 0) { - fscache_set_op_state(&op->op, "Abort"); - fscache_abort_object(object); - } else { - fscache_enqueue_operation(&op->op); - } + fscache_set_op_state(&op->op, "Store"); + fscache_stat(&fscache_n_store_pages); + fscache_stat(&fscache_n_cop_write_page); + ret = object->cache->ops->write_page(op, page); + fscache_stat_d(&fscache_n_cop_write_page); + fscache_set_op_state(&op->op, "EndWrite"); + fscache_end_page_write(object, page); + if (ret < 0) { + fscache_set_op_state(&op->op, "Abort"); + fscache_abort_object(object); + } else { + fscache_enqueue_operation(&op->op); } _leave(""); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e53df5ebb2b8..9424796d6634 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); @@ -499,6 +502,9 @@ struct fuse_copy_state { int write; struct fuse_req *req; const struct iovec *iov; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; unsigned long nr_segs; unsigned long seglen; unsigned long addr; @@ -506,16 +512,16 @@ struct fuse_copy_state { void *mapaddr; void *buf; unsigned len; + unsigned move_pages:1; }; static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, - int write, struct fuse_req *req, + int write, const struct iovec *iov, unsigned long nr_segs) { memset(cs, 0, sizeof(*cs)); cs->fc = fc; cs->write = write; - cs->req = req; cs->iov = iov; cs->nr_segs = nr_segs; } @@ -523,7 +529,18 @@ static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, /* Unmap and put previous page of userspace buffer */ static void fuse_copy_finish(struct fuse_copy_state *cs) { - if (cs->mapaddr) { + if (cs->currbuf) { + struct pipe_buffer *buf = cs->currbuf; + + if (!cs->write) { + buf->ops->unmap(cs->pipe, buf, cs->mapaddr); + } else { + kunmap_atomic(cs->mapaddr, KM_USER0); + buf->len = PAGE_SIZE - cs->len; + } + cs->currbuf = NULL; + cs->mapaddr = NULL; + } else if (cs->mapaddr) { kunmap_atomic(cs->mapaddr, KM_USER0); if (cs->write) { flush_dcache_page(cs->pg); @@ -545,26 +562,61 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) unlock_request(cs->fc, cs->req); fuse_copy_finish(cs); - if (!cs->seglen) { - BUG_ON(!cs->nr_segs); - cs->seglen = cs->iov[0].iov_len; - cs->addr = (unsigned long) cs->iov[0].iov_base; - cs->iov++; - cs->nr_segs--; + if (cs->pipebufs) { + struct pipe_buffer *buf = cs->pipebufs; + + if (!cs->write) { + err = buf->ops->confirm(cs->pipe, buf); + if (err) + return err; + + BUG_ON(!cs->nr_segs); + cs->currbuf = buf; + cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); + cs->len = buf->len; + cs->buf = cs->mapaddr + buf->offset; + cs->pipebufs++; + cs->nr_segs--; + } else { + struct page *page; + + if (cs->nr_segs == cs->pipe->buffers) + return -EIO; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return -ENOMEM; + + buf->page = page; + buf->offset = 0; + buf->len = 0; + + cs->currbuf = buf; + cs->mapaddr = kmap_atomic(page, KM_USER0); + cs->buf = cs->mapaddr; + cs->len = PAGE_SIZE; + cs->pipebufs++; + cs->nr_segs++; + } + } else { + if (!cs->seglen) { + BUG_ON(!cs->nr_segs); + cs->seglen = cs->iov[0].iov_len; + cs->addr = (unsigned long) cs->iov[0].iov_base; + cs->iov++; + cs->nr_segs--; + } + err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); + if (err < 0) + return err; + BUG_ON(err != 1); + offset = cs->addr % PAGE_SIZE; + cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); + cs->buf = cs->mapaddr + offset; + cs->len = min(PAGE_SIZE - offset, cs->seglen); + cs->seglen -= cs->len; + cs->addr += cs->len; } - down_read(¤t->mm->mmap_sem); - err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, - &cs->pg, NULL); - up_read(¤t->mm->mmap_sem); - if (err < 0) - return err; - BUG_ON(err != 1); - offset = cs->addr % PAGE_SIZE; - cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); - cs->buf = cs->mapaddr + offset; - cs->len = min(PAGE_SIZE - offset, cs->seglen); - cs->seglen -= cs->len; - cs->addr += cs->len; return lock_request(cs->fc, cs->req); } @@ -586,23 +638,178 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) return ncpy; } +static int fuse_check_page(struct page *page) +{ + if (page_mapcount(page) || + page->mapping != NULL || + page_count(page) != 1 || + (page->flags & PAGE_FLAGS_CHECK_AT_PREP & + ~(1 << PG_locked | + 1 << PG_referenced | + 1 << PG_uptodate | + 1 << PG_lru | + 1 << PG_active | + 1 << PG_reclaim))) { + printk(KERN_WARNING "fuse: trying to steal weird page\n"); + printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); + return 1; + } + return 0; +} + +static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) +{ + int err; + struct page *oldpage = *pagep; + struct page *newpage; + struct pipe_buffer *buf = cs->pipebufs; + struct address_space *mapping; + pgoff_t index; + + unlock_request(cs->fc, cs->req); + fuse_copy_finish(cs); + + err = buf->ops->confirm(cs->pipe, buf); + if (err) + return err; + + BUG_ON(!cs->nr_segs); + cs->currbuf = buf; + cs->len = buf->len; + cs->pipebufs++; + cs->nr_segs--; + + if (cs->len != PAGE_SIZE) + goto out_fallback; + + if (buf->ops->steal(cs->pipe, buf) != 0) + goto out_fallback; + + newpage = buf->page; + + if (WARN_ON(!PageUptodate(newpage))) + return -EIO; + + ClearPageMappedToDisk(newpage); + + if (fuse_check_page(newpage) != 0) + goto out_fallback_unlock; + + mapping = oldpage->mapping; + index = oldpage->index; + + /* + * This is a new and locked page, it shouldn't be mapped or + * have any special flags on it + */ + if (WARN_ON(page_mapped(oldpage))) + goto out_fallback_unlock; + if (WARN_ON(page_has_private(oldpage))) + goto out_fallback_unlock; + if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage))) + goto out_fallback_unlock; + if (WARN_ON(PageMlocked(oldpage))) + goto out_fallback_unlock; + + remove_from_page_cache(oldpage); + page_cache_release(oldpage); + + err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL); + if (err) { + printk(KERN_WARNING "fuse_try_move_page: failed to add page"); + goto out_fallback_unlock; + } + page_cache_get(newpage); + + if (!(buf->flags & PIPE_BUF_FLAG_LRU)) + lru_cache_add_file(newpage); + + err = 0; + spin_lock(&cs->fc->lock); + if (cs->req->aborted) + err = -ENOENT; + else + *pagep = newpage; + spin_unlock(&cs->fc->lock); + + if (err) { + unlock_page(newpage); + page_cache_release(newpage); + return err; + } + + unlock_page(oldpage); + page_cache_release(oldpage); + cs->len = 0; + + return 0; + +out_fallback_unlock: + unlock_page(newpage); +out_fallback: + cs->mapaddr = buf->ops->map(cs->pipe, buf, 1); + cs->buf = cs->mapaddr + buf->offset; + + err = lock_request(cs->fc, cs->req); + if (err) + return err; + + return 1; +} + +static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, + unsigned offset, unsigned count) +{ + struct pipe_buffer *buf; + + if (cs->nr_segs == cs->pipe->buffers) + return -EIO; + + unlock_request(cs->fc, cs->req); + fuse_copy_finish(cs); + + buf = cs->pipebufs; + page_cache_get(page); + buf->page = page; + buf->offset = offset; + buf->len = count; + + cs->pipebufs++; + cs->nr_segs++; + cs->len = 0; + + return 0; +} + /* * Copy a page in the request to/from the userspace buffer. Must be * done atomically */ -static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, +static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, unsigned offset, unsigned count, int zeroing) { + int err; + struct page *page = *pagep; + if (page && zeroing && count < PAGE_SIZE) { void *mapaddr = kmap_atomic(page, KM_USER1); memset(mapaddr, 0, PAGE_SIZE); kunmap_atomic(mapaddr, KM_USER1); } while (count) { - if (!cs->len) { - int err = fuse_copy_fill(cs); - if (err) - return err; + if (cs->write && cs->pipebufs && page) { + return fuse_ref_page(cs, page, offset, count); + } else if (!cs->len) { + if (cs->move_pages && page && + offset == 0 && count == PAGE_SIZE) { + err = fuse_try_move_page(cs, pagep); + if (err <= 0) + return err; + } else { + err = fuse_copy_fill(cs); + if (err) + return err; + } } if (page) { void *mapaddr = kmap_atomic(page, KM_USER1); @@ -627,8 +834,10 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { - struct page *page = req->pages[i]; - int err = fuse_copy_page(cs, page, offset, count, zeroing); + int err; + + err = fuse_copy_page(cs, &req->pages[i], offset, count, + zeroing); if (err) return err; @@ -705,11 +914,10 @@ __acquires(&fc->lock) * * Called with fc->lock held, releases it */ -static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req, - const struct iovec *iov, unsigned long nr_segs) +static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, + size_t nbytes, struct fuse_req *req) __releases(&fc->lock) { - struct fuse_copy_state cs; struct fuse_in_header ih; struct fuse_interrupt_in arg; unsigned reqsize = sizeof(ih) + sizeof(arg); @@ -725,14 +933,13 @@ __releases(&fc->lock) arg.unique = req->in.h.unique; spin_unlock(&fc->lock); - if (iov_length(iov, nr_segs) < reqsize) + if (nbytes < reqsize) return -EINVAL; - fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs); - err = fuse_copy_one(&cs, &ih, sizeof(ih)); + err = fuse_copy_one(cs, &ih, sizeof(ih)); if (!err) - err = fuse_copy_one(&cs, &arg, sizeof(arg)); - fuse_copy_finish(&cs); + err = fuse_copy_one(cs, &arg, sizeof(arg)); + fuse_copy_finish(cs); return err ? err : reqsize; } @@ -746,18 +953,13 @@ __releases(&fc->lock) * request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) +static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, + struct fuse_copy_state *cs, size_t nbytes) { int err; struct fuse_req *req; struct fuse_in *in; - struct fuse_copy_state cs; unsigned reqsize; - struct file *file = iocb->ki_filp; - struct fuse_conn *fc = fuse_get_conn(file); - if (!fc) - return -EPERM; restart: spin_lock(&fc->lock); @@ -777,7 +979,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, if (!list_empty(&fc->interrupts)) { req = list_entry(fc->interrupts.next, struct fuse_req, intr_entry); - return fuse_read_interrupt(fc, req, iov, nr_segs); + return fuse_read_interrupt(fc, cs, nbytes, req); } req = list_entry(fc->pending.next, struct fuse_req, list); @@ -787,7 +989,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, in = &req->in; reqsize = in->h.len; /* If request is too large, reply with an error and restart the read */ - if (iov_length(iov, nr_segs) < reqsize) { + if (nbytes < reqsize) { req->out.h.error = -EIO; /* SETXATTR is special, since it may contain too large data */ if (in->h.opcode == FUSE_SETXATTR) @@ -796,12 +998,12 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, goto restart; } spin_unlock(&fc->lock); - fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); - err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); + cs->req = req; + err = fuse_copy_one(cs, &in->h, sizeof(in->h)); if (!err) - err = fuse_copy_args(&cs, in->numargs, in->argpages, + err = fuse_copy_args(cs, in->numargs, in->argpages, (struct fuse_arg *) in->args, 0); - fuse_copy_finish(&cs); + fuse_copy_finish(cs); spin_lock(&fc->lock); req->locked = 0; if (req->aborted) { @@ -829,6 +1031,110 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, return err; } +static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct fuse_copy_state cs; + struct file *file = iocb->ki_filp; + struct fuse_conn *fc = fuse_get_conn(file); + if (!fc) + return -EPERM; + + fuse_copy_init(&cs, fc, 1, iov, nr_segs); + + return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs)); +} + +static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + return 1; +} + +static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = { + .can_merge = 0, + .map = generic_pipe_buf_map, + .unmap = generic_pipe_buf_unmap, + .confirm = generic_pipe_buf_confirm, + .release = generic_pipe_buf_release, + .steal = fuse_dev_pipe_buf_steal, + .get = generic_pipe_buf_get, +}; + +static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, + size_t len, unsigned int flags) +{ + int ret; + int page_nr = 0; + int do_wakeup = 0; + struct pipe_buffer *bufs; + struct fuse_copy_state cs; + struct fuse_conn *fc = fuse_get_conn(in); + if (!fc) + return -EPERM; + + bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); + if (!bufs) + return -ENOMEM; + + fuse_copy_init(&cs, fc, 1, NULL, 0); + cs.pipebufs = bufs; + cs.pipe = pipe; + ret = fuse_dev_do_read(fc, in, &cs, len); + if (ret < 0) + goto out; + + ret = 0; + pipe_lock(pipe); + + if (!pipe->readers) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; + goto out_unlock; + } + + if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { + ret = -EIO; + goto out_unlock; + } + + while (page_nr < cs.nr_segs) { + int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); + struct pipe_buffer *buf = pipe->bufs + newbuf; + + buf->page = bufs[page_nr].page; + buf->offset = bufs[page_nr].offset; + buf->len = bufs[page_nr].len; + buf->ops = &fuse_dev_pipe_buf_ops; + + pipe->nrbufs++; + page_nr++; + ret += buf->len; + + if (pipe->inode) + do_wakeup = 1; + } + +out_unlock: + pipe_unlock(pipe); + + if (do_wakeup) { + smp_mb(); + if (waitqueue_active(&pipe->wait)) + wake_up_interruptible(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + } + +out: + for (; page_nr < cs.nr_segs; page_nr++) + page_cache_release(bufs[page_nr].page); + + kfree(bufs); + return ret; +} + static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, struct fuse_copy_state *cs) { @@ -988,23 +1294,17 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */ -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) +static ssize_t fuse_dev_do_write(struct fuse_conn *fc, + struct fuse_copy_state *cs, size_t nbytes) { int err; - size_t nbytes = iov_length(iov, nr_segs); struct fuse_req *req; struct fuse_out_header oh; - struct fuse_copy_state cs; - struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); - if (!fc) - return -EPERM; - fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); if (nbytes < sizeof(struct fuse_out_header)) return -EINVAL; - err = fuse_copy_one(&cs, &oh, sizeof(oh)); + err = fuse_copy_one(cs, &oh, sizeof(oh)); if (err) goto err_finish; @@ -1017,7 +1317,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, * and error contains notification code. */ if (!oh.unique) { - err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); + err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); return err ? err : nbytes; } @@ -1036,7 +1336,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, if (req->aborted) { spin_unlock(&fc->lock); - fuse_copy_finish(&cs); + fuse_copy_finish(cs); spin_lock(&fc->lock); request_end(fc, req); return -ENOENT; @@ -1053,7 +1353,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, queue_interrupt(fc, req); spin_unlock(&fc->lock); - fuse_copy_finish(&cs); + fuse_copy_finish(cs); return nbytes; } @@ -1061,11 +1361,13 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, list_move(&req->list, &fc->io); req->out.h = oh; req->locked = 1; - cs.req = req; + cs->req = req; + if (!req->out.page_replace) + cs->move_pages = 0; spin_unlock(&fc->lock); - err = copy_out_args(&cs, &req->out, nbytes); - fuse_copy_finish(&cs); + err = copy_out_args(cs, &req->out, nbytes); + fuse_copy_finish(cs); spin_lock(&fc->lock); req->locked = 0; @@ -1081,10 +1383,101 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, err_unlock: spin_unlock(&fc->lock); err_finish: - fuse_copy_finish(&cs); + fuse_copy_finish(cs); return err; } +static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct fuse_copy_state cs; + struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); + if (!fc) + return -EPERM; + + fuse_copy_init(&cs, fc, 0, iov, nr_segs); + + return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); +} + +static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + struct file *out, loff_t *ppos, + size_t len, unsigned int flags) +{ + unsigned nbuf; + unsigned idx; + struct pipe_buffer *bufs; + struct fuse_copy_state cs; + struct fuse_conn *fc; + size_t rem; + ssize_t ret; + + fc = fuse_get_conn(out); + if (!fc) + return -EPERM; + + bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); + if (!bufs) + return -ENOMEM; + + pipe_lock(pipe); + nbuf = 0; + rem = 0; + for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) + rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; + + ret = -EINVAL; + if (rem < len) { + pipe_unlock(pipe); + goto out; + } + + rem = len; + while (rem) { + struct pipe_buffer *ibuf; + struct pipe_buffer *obuf; + + BUG_ON(nbuf >= pipe->buffers); + BUG_ON(!pipe->nrbufs); + ibuf = &pipe->bufs[pipe->curbuf]; + obuf = &bufs[nbuf]; + + if (rem >= ibuf->len) { + *obuf = *ibuf; + ibuf->ops = NULL; + pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); + pipe->nrbufs--; + } else { + ibuf->ops->get(pipe, ibuf); + *obuf = *ibuf; + obuf->flags &= ~PIPE_BUF_FLAG_GIFT; + obuf->len = rem; + ibuf->offset += obuf->len; + ibuf->len -= obuf->len; + } + nbuf++; + rem -= obuf->len; + } + pipe_unlock(pipe); + + fuse_copy_init(&cs, fc, 0, NULL, nbuf); + cs.pipebufs = bufs; + cs.pipe = pipe; + + if (flags & SPLICE_F_MOVE) + cs.move_pages = 1; + + ret = fuse_dev_do_write(fc, &cs, len); + + for (idx = 0; idx < nbuf; idx++) { + struct pipe_buffer *buf = &bufs[idx]; + buf->ops->release(pipe, buf); + } +out: + kfree(bufs); + return ret; +} + static unsigned fuse_dev_poll(struct file *file, poll_table *wait) { unsigned mask = POLLOUT | POLLWRNORM; @@ -1226,8 +1619,10 @@ const struct file_operations fuse_dev_operations = { .llseek = no_llseek, .read = do_sync_read, .aio_read = fuse_dev_read, + .splice_read = fuse_dev_splice_read, .write = do_sync_write, .aio_write = fuse_dev_write, + .splice_write = fuse_dev_splice_write, .poll = fuse_dev_poll, .release = fuse_dev_release, .fasync = fuse_dev_fasync, diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4787ae6c5c1c..3cdc5f78a406 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1156,10 +1156,9 @@ static int fuse_dir_release(struct inode *inode, struct file *file) return 0; } -static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync) +static int fuse_dir_fsync(struct file *file, int datasync) { - /* nfsd can call this with no file */ - return file ? fuse_fsync_common(file, de, datasync, 1) : 0; + return fuse_fsync_common(file, datasync, 1); } static bool update_mtime(unsigned ivalid) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a9f5e137f1d3..ada0adeb3bb5 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -351,10 +351,9 @@ static void fuse_sync_writes(struct inode *inode) fuse_release_nowrite(inode); } -int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, - int isdir) +int fuse_fsync_common(struct file *file, int datasync, int isdir) { - struct inode *inode = de->d_inode; + struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; @@ -403,9 +402,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, return err; } -static int fuse_fsync(struct file *file, struct dentry *de, int datasync) +static int fuse_fsync(struct file *file, int datasync) { - return fuse_fsync_common(file, de, datasync, 0); + return fuse_fsync_common(file, datasync, 0); } void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, @@ -517,17 +516,26 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) int i; size_t count = req->misc.read.in.size; size_t num_read = req->out.args[0].size; - struct inode *inode = req->pages[0]->mapping->host; + struct address_space *mapping = NULL; - /* - * Short read means EOF. If file size is larger, truncate it - */ - if (!req->out.h.error && num_read < count) { - loff_t pos = page_offset(req->pages[0]) + num_read; - fuse_read_update_size(inode, pos, req->misc.read.attr_ver); - } + for (i = 0; mapping == NULL && i < req->num_pages; i++) + mapping = req->pages[i]->mapping; - fuse_invalidate_attr(inode); /* atime changed */ + if (mapping) { + struct inode *inode = mapping->host; + + /* + * Short read means EOF. If file size is larger, truncate it + */ + if (!req->out.h.error && num_read < count) { + loff_t pos; + + pos = page_offset(req->pages[0]) + num_read; + fuse_read_update_size(inode, pos, + req->misc.read.attr_ver); + } + fuse_invalidate_attr(inode); /* atime changed */ + } for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; @@ -536,6 +544,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) else SetPageError(page); unlock_page(page); + page_cache_release(page); } if (req->ff) fuse_file_put(req->ff); @@ -550,6 +559,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file) req->out.argpages = 1; req->out.page_zeroing = 1; + req->out.page_replace = 1; fuse_read_fill(req, file, pos, count, FUSE_READ); req->misc.read.attr_ver = fuse_get_attr_version(fc); if (fc->async_read) { @@ -589,6 +599,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) return PTR_ERR(req); } } + page_cache_get(page); req->pages[req->num_pages] = page; req->num_pages++; return 0; @@ -994,10 +1005,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, nbytes = min_t(size_t, nbytes, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ); - down_read(¤t->mm->mmap_sem); - npages = get_user_pages(current, current->mm, user_addr, npages, !write, - 0, req->pages, NULL); - up_read(¤t->mm->mmap_sem); + npages = get_user_pages_fast(user_addr, npages, !write, req->pages); if (npages < 0) return npages; @@ -1580,9 +1588,9 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, while (iov_iter_count(&ii)) { struct page *page = pages[page_idx++]; size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); - void *kaddr, *map; + void *kaddr; - kaddr = map = kmap(page); + kaddr = kmap(page); while (todo) { char __user *uaddr = ii.iov->iov_base + ii.iov_offset; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 01cc462ff45d..8f309f04064e 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -177,6 +177,9 @@ struct fuse_out { /** Zero partially or not copied pages */ unsigned page_zeroing:1; + /** Pages may be replaced with new ones */ + unsigned page_replace:1; + /** Number or arguments */ unsigned numargs; @@ -568,8 +571,7 @@ void fuse_release_common(struct file *file, int opcode); /** * Send FSYNC or FSYNCDIR request */ -int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, - int isdir); +int fuse_fsync_common(struct file *file, int datasync, int isdir); /** * Notify poll wakeup diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index a739a0a48067..9f8b52500d63 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -700,8 +700,14 @@ out: return 0; page_cache_release(page); + + /* + * XXX(hch): the call below should probably be replaced with + * a call to the gfs2-specific truncate blocks helper to actually + * release disk blocks.. + */ if (pos + len > ip->i_inode.i_size) - vmtruncate(&ip->i_inode, ip->i_inode.i_size); + simple_setsize(&ip->i_inode, ip->i_inode.i_size); out_endtrans: gfs2_trans_end(sdp); out_trans_fail: diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index b20bfcc9fa2d..ed9a94f0ef15 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -554,9 +554,9 @@ static int gfs2_close(struct inode *inode, struct file *file) * Returns: errno */ -static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) +static int gfs2_fsync(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); int ret = 0; diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 4e64352d49de..98cdd05f3316 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -1071,6 +1071,9 @@ int gfs2_permission(struct inode *inode, int mask) return error; } +/* + * XXX: should be changed to have proper ordering by opencoding simple_setsize + */ static int setattr_size(struct inode *inode, struct iattr *attr) { struct gfs2_inode *ip = GFS2_I(inode); @@ -1081,7 +1084,7 @@ static int setattr_size(struct inode *inode, struct iattr *attr) error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); if (error) return error; - error = vmtruncate(inode, attr->ia_size); + error = simple_setsize(inode, attr->ia_size); gfs2_trans_end(sdp); if (error) return error; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 3a029d8f4cf1..87ac1891a185 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -411,9 +411,9 @@ int hostfs_file_open(struct inode *ino, struct file *file) return 0; } -int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync) +int hostfs_fsync(struct file *file, int datasync) { - return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync); + return fsync_file(HOSTFS_I(file->f_mapping->host)->fd, datasync); } static const struct file_operations hostfs_file_fops = { diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 3efabff00367..a9ae9bfa752f 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -19,9 +19,9 @@ static int hpfs_file_release(struct inode *inode, struct file *file) return 0; } -int hpfs_file_fsync(struct file *file, struct dentry *dentry, int datasync) +int hpfs_file_fsync(struct file *file, int datasync) { - /*return file_fsync(file, dentry);*/ + /*return file_fsync(file, datasync);*/ return 0; /* Don't fsync :-) */ } diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 97bf738cd5d6..75f9d4324851 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -268,7 +268,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, const char *, /* file.c */ -int hpfs_file_fsync(struct file *, struct dentry *, int); +int hpfs_file_fsync(struct file *, int); extern const struct file_operations hpfs_file_ops; extern const struct inode_operations hpfs_file_iops; extern const struct address_space_operations hpfs_aops; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 2e4dfa8593da..826c3f9d29ac 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -587,7 +587,7 @@ static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) return err; } -static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync) +static int hppfs_fsync(struct file *file, int datasync) { return 0; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a0bbd3d1b41a..a4e9a7ec3691 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -688,7 +688,7 @@ static void init_once(void *foo) const struct file_operations hugetlbfs_file_operations = { .read = hugetlbfs_read, .mmap = hugetlbfs_file_mmap, - .fsync = simple_sync_file, + .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, }; diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index b9ab69b3a482..e0aca9a0ac68 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -272,6 +272,7 @@ static int isofs_readdir(struct file *filp, const struct file_operations isofs_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = isofs_readdir, }; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index bfc70f57900f..e214d68620ac 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1311,7 +1311,6 @@ int jbd2_journal_stop(handle_t *handle) if (handle->h_sync) transaction->t_synchronous_commit = 1; current->journal_info = NULL; - spin_lock(&journal->j_state_lock); spin_lock(&transaction->t_handle_lock); transaction->t_outstanding_credits -= handle->h_buffer_credits; transaction->t_updates--; @@ -1340,8 +1339,7 @@ int jbd2_journal_stop(handle_t *handle) jbd_debug(2, "transaction too old, requesting commit for " "handle %p\n", handle); /* This is non-blocking */ - __jbd2_log_start_commit(journal, transaction->t_tid); - spin_unlock(&journal->j_state_lock); + jbd2_log_start_commit(journal, transaction->t_tid); /* * Special case: JBD2_SYNC synchronous updates require us @@ -1351,7 +1349,6 @@ int jbd2_journal_stop(handle_t *handle) err = jbd2_log_wait_commit(journal, tid); } else { spin_unlock(&transaction->t_handle_lock); - spin_unlock(&journal->j_state_lock); } lock_map_release(&handle->h_lockdep_map); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index e7291c161a19..813497024437 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -26,9 +26,9 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page **pagep, void **fsdata); static int jffs2_readpage (struct file *filp, struct page *pg); -int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) +int jffs2_fsync(struct file *filp, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); /* Trigger GC to flush any pending writes for this inode */ diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 86e0821fc989..8bc2c80ab159 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -169,13 +169,13 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) mutex_unlock(&f->sem); jffs2_complete_reservation(c); - /* We have to do the vmtruncate() without f->sem held, since + /* We have to do the simple_setsize() without f->sem held, since some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { - vmtruncate(inode, iattr->ia_size); + simple_setsize(inode, iattr->ia_size); inode->i_blocks = (inode->i_size + 511) >> 9; } diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index 035a767f958b..4791aacf3084 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -158,7 +158,7 @@ extern const struct inode_operations jffs2_dir_inode_operations; extern const struct file_operations jffs2_file_operations; extern const struct inode_operations jffs2_file_inode_operations; extern const struct address_space_operations jffs2_file_address_operations; -int jffs2_fsync(struct file *, struct dentry *, int); +int jffs2_fsync(struct file *, int); int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); /* ioctl.c */ diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 85d9ec659225..127263cc8657 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -27,9 +27,9 @@ #include "jfs_acl.h" #include "jfs_debug.h" -int jfs_fsync(struct file *file, struct dentry *dentry, int datasync) +int jfs_fsync(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; int rc = 0; if (!(inode->i_state & I_DIRTY) || diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index 9e6bda30a6e8..11042b1f44b5 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -21,7 +21,7 @@ struct fid; extern struct inode *ialloc(struct inode *, umode_t); -extern int jfs_fsync(struct file *, struct dentry *, int); +extern int jfs_fsync(struct file *, int); extern long jfs_ioctl(struct file *, unsigned int, unsigned long); extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); extern struct inode *jfs_iget(struct super_block *, unsigned long); diff --git a/fs/jfs/super.c b/fs/jfs/super.c index b66832ac33ac..b38f96bef829 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -179,6 +179,8 @@ static void jfs_put_super(struct super_block *sb) jfs_info("In jfs_put_super"); + dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + lock_kernel(); rc = jfs_umount(sb); @@ -396,10 +398,20 @@ static int jfs_remount(struct super_block *sb, int *flags, char *data) JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); + + /* mark the fs r/w for quota activity */ + sb->s_flags &= ~MS_RDONLY; + unlock_kernel(); + dquot_resume(sb, -1); return ret; } if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { + rc = dquot_suspend(sb, -1); + if (rc < 0) { + unlock_kernel(); + return rc; + } rc = jfs_umount_rw(sb); JFS_SBI(sb)->flag = flag; unlock_kernel(); @@ -469,6 +481,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) */ sb->s_op = &jfs_super_operations; sb->s_export_op = &jfs_export_operations; +#ifdef CONFIG_QUOTA + sb->dq_op = &dquot_operations; + sb->s_qcop = &dquot_quotactl_ops; +#endif /* * Initialize direct-mapping inode/address-space diff --git a/fs/libfs.c b/fs/libfs.c index 232bea425b09..09e1016eb774 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -58,11 +59,6 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct na return NULL; } -int simple_sync_file(struct file * file, struct dentry *dentry, int datasync) -{ - return 0; -} - int dcache_dir_open(struct inode *inode, struct file *file) { static struct qstr cursor_name = {.len = 1, .name = "."}; @@ -190,7 +186,7 @@ const struct file_operations simple_dir_operations = { .llseek = dcache_dir_lseek, .read = generic_read_dir, .readdir = dcache_readdir, - .fsync = simple_sync_file, + .fsync = noop_fsync, }; const struct inode_operations simple_dir_inode_operations = { @@ -330,6 +326,81 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, return 0; } +/** + * simple_setsize - handle core mm and vfs requirements for file size change + * @inode: inode + * @newsize: new file size + * + * Returns 0 on success, -error on failure. + * + * simple_setsize must be called with inode_mutex held. + * + * simple_setsize will check that the requested new size is OK (see + * inode_newsize_ok), and then will perform the necessary i_size update + * and pagecache truncation (if necessary). It will be typically be called + * from the filesystem's setattr function when ATTR_SIZE is passed in. + * + * The inode itself must have correct permissions and attributes to allow + * i_size to be changed, this function then just checks that the new size + * requested is valid. + * + * In the case of simple in-memory filesystems with inodes stored solely + * in the inode cache, and file data in the pagecache, nothing more needs + * to be done to satisfy a truncate request. Filesystems with on-disk + * blocks for example will need to free them in the case of truncate, in + * that case it may be easier not to use simple_setsize (but each of its + * components will likely be required at some point to update pagecache + * and inode etc). + */ +int simple_setsize(struct inode *inode, loff_t newsize) +{ + loff_t oldsize; + int error; + + error = inode_newsize_ok(inode, newsize); + if (error) + return error; + + oldsize = inode->i_size; + i_size_write(inode, newsize); + truncate_pagecache(inode, oldsize, newsize); + + return error; +} +EXPORT_SYMBOL(simple_setsize); + +/** + * simple_setattr - setattr for simple in-memory filesystem + * @dentry: dentry + * @iattr: iattr structure + * + * Returns 0 on success, -error on failure. + * + * simple_setattr implements setattr for an in-memory filesystem which + * does not store its own file data or metadata (eg. uses the page cache + * and inode cache as its data store). + */ +int simple_setattr(struct dentry *dentry, struct iattr *iattr) +{ + struct inode *inode = dentry->d_inode; + int error; + + error = inode_change_ok(inode, iattr); + if (error) + return error; + + if (iattr->ia_valid & ATTR_SIZE) { + error = simple_setsize(inode, iattr->ia_size); + if (error) + return error; + } + + generic_setattr(inode, iattr); + + return error; +} +EXPORT_SYMBOL(simple_setattr); + int simple_readpage(struct file *file, struct page *page) { clear_highpage(page); @@ -851,13 +922,22 @@ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, } EXPORT_SYMBOL_GPL(generic_fh_to_parent); -int simple_fsync(struct file *file, struct dentry *dentry, int datasync) +/** + * generic_file_fsync - generic fsync implementation for simple filesystems + * @file: file to synchronize + * @datasync: only synchronize essential metadata if true + * + * This is a generic implementation of the fsync method for simple + * filesystems which track all non-inode metadata in the buffers list + * hanging off the address_space structure. + */ +int generic_file_fsync(struct file *file, int datasync) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, /* metadata-only; caller takes care of data */ }; - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; int err; int ret; @@ -872,7 +952,15 @@ int simple_fsync(struct file *file, struct dentry *dentry, int datasync) ret = err; return ret; } -EXPORT_SYMBOL(simple_fsync); +EXPORT_SYMBOL(generic_file_fsync); + +/* + * No-op implementation of ->fsync for in-memory filesystems. + */ +int noop_fsync(struct file *file, int datasync) +{ + return 0; +} EXPORT_SYMBOL(dcache_dir_close); EXPORT_SYMBOL(dcache_dir_lseek); @@ -895,7 +983,7 @@ EXPORT_SYMBOL(simple_release_fs); EXPORT_SYMBOL(simple_rename); EXPORT_SYMBOL(simple_rmdir); EXPORT_SYMBOL(simple_statfs); -EXPORT_SYMBOL(simple_sync_file); +EXPORT_SYMBOL(noop_fsync); EXPORT_SYMBOL(simple_unlink); EXPORT_SYMBOL(simple_read_from_buffer); EXPORT_SYMBOL(simple_write_to_buffer); diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 0de524071870..abe1cafbd4c2 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -219,9 +219,9 @@ int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, } } -int logfs_fsync(struct file *file, struct dentry *dentry, int datasync) +int logfs_fsync(struct file *file, int datasync) { - struct super_block *sb = dentry->d_inode->i_sb; + struct super_block *sb = file->f_mapping->host->i_sb; logfs_write_anchor(sb); return 0; diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index 1a9db84f8d8f..c838c4d72111 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h @@ -506,7 +506,7 @@ extern const struct address_space_operations logfs_reg_aops; int logfs_readpage(struct file *file, struct page *page); int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); -int logfs_fsync(struct file *file, struct dentry *dentry, int datasync); +int logfs_fsync(struct file *file, int datasync); /* gc.c */ u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec); diff --git a/fs/minix/dir.c b/fs/minix/dir.c index 6198731d7fcd..91969589131c 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -22,7 +22,7 @@ const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = minix_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, }; static inline void dir_put_page(struct page *page) @@ -72,11 +72,8 @@ static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); - if (!IS_ERR(page)) { + if (!IS_ERR(page)) kmap(page); - if (!PageUptodate(page)) - goto fail; - } return page; fail: diff --git a/fs/minix/file.c b/fs/minix/file.c index 3eec3e607a87..d5320ff23faf 100644 --- a/fs/minix/file.c +++ b/fs/minix/file.c @@ -19,7 +19,7 @@ const struct file_operations minix_file_operations = { .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, }; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f23010969369..13487ad16894 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -20,6 +20,9 @@ static inline block_t *i_data(struct inode *inode) return (block_t *)minix_i(inode)->u.i2_data; } +#define DIRCOUNT 7 +#define INDIRCOUNT(sb) (1 << ((sb)->s_blocksize_bits - 2)) + static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) { int n = 0; @@ -34,21 +37,21 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) printk("MINIX-fs: block_to_path: " "block %ld too big on dev %s\n", block, bdevname(sb->s_bdev, b)); - } else if (block < 7) { + } else if (block < DIRCOUNT) { offsets[n++] = block; - } else if ((block -= 7) < 256) { - offsets[n++] = 7; + } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { + offsets[n++] = DIRCOUNT; offsets[n++] = block; - } else if ((block -= 256) < 256*256) { - offsets[n++] = 8; - offsets[n++] = block>>8; - offsets[n++] = block & 255; + } else if ((block -= INDIRCOUNT(sb)) < INDIRCOUNT(sb) * INDIRCOUNT(sb)) { + offsets[n++] = DIRCOUNT + 1; + offsets[n++] = block / INDIRCOUNT(sb); + offsets[n++] = block % INDIRCOUNT(sb); } else { - block -= 256*256; - offsets[n++] = 9; - offsets[n++] = block>>16; - offsets[n++] = (block>>8) & 255; - offsets[n++] = block & 255; + block -= INDIRCOUNT(sb) * INDIRCOUNT(sb); + offsets[n++] = DIRCOUNT + 2; + offsets[n++] = (block / INDIRCOUNT(sb)) / INDIRCOUNT(sb); + offsets[n++] = (block / INDIRCOUNT(sb)) % INDIRCOUNT(sb); + offsets[n++] = block % INDIRCOUNT(sb); } return n; } diff --git a/fs/namei.c b/fs/namei.c index 48e1f60520ea..868d0cb9d473 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1621,6 +1621,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, case LAST_DOTDOT: follow_dotdot(nd); dir = nd->path.dentry; + case LAST_DOT: if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) { if (!dir->d_op->d_revalidate(dir, nd)) { error = -ESTALE; @@ -1628,7 +1629,6 @@ static struct file *do_last(struct nameidata *nd, struct path *path, } } /* fallthrough */ - case LAST_DOT: case LAST_ROOT: if (open_flag & O_CREAT) goto exit; diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 92dde6f8d893..9578cbe0cd58 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -49,6 +49,7 @@ extern int ncp_symlink(struct inode *, struct dentry *, const char *); const struct file_operations ncp_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = ncp_readdir, .unlocked_ioctl = ncp_ioctl, diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index b93870892892..3639cc5cbdae 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -22,7 +22,7 @@ #include #include "ncplib_kernel.h" -static int ncp_fsync(struct file *file, struct dentry *dentry, int datasync) +static int ncp_fsync(struct file *file, int datasync) { return 0; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ee9a179ebdf3..782b431ef91c 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -53,7 +53,7 @@ static int nfs_link(struct dentry *, struct inode *, struct dentry *); static int nfs_mknod(struct inode *, struct dentry *, int, dev_t); static int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -static int nfs_fsync_dir(struct file *, struct dentry *, int); +static int nfs_fsync_dir(struct file *, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); const struct file_operations nfs_dir_operations = { @@ -641,8 +641,10 @@ out: * All directory operations under NFS are synchronous, so fsync() * is a dummy operation. */ -static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync) +static int nfs_fsync_dir(struct file *filp, int datasync) { + struct dentry *dentry = filp->f_path.dentry; + dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n", dentry->d_parent->d_name.name, dentry->d_name.name, datasync); @@ -1741,6 +1743,7 @@ remove_lru_entry: clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); smp_mb__after_clear_bit(); } + spin_unlock(&inode->i_lock); } spin_unlock(&nfs_access_lru_lock); nfs_access_free_list(&head); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cac96bcc91e4..36a5e74f51b4 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -53,7 +53,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int nfs_file_flush(struct file *, fl_owner_t id); -static int nfs_file_fsync(struct file *, struct dentry *dentry, int datasync); +static int nfs_file_fsync(struct file *, int datasync); static int nfs_check_flags(int flags); static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); @@ -322,8 +322,9 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma) * whether any write errors occurred for this process. */ static int -nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync) +nfs_file_fsync(struct file *file, int datasync) { + struct dentry *dentry = file->f_path.dentry; struct nfs_open_context *ctx = nfs_file_open_context(file); struct inode *inode = dentry->d_inode; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3aea3ca98ab7..91679e2631ee 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode *inode, int how) int res = 0; if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) - goto out; + goto out_mark_dirty; spin_lock(&inode->i_lock); res = nfs_scan_commit(inode, &head, 0, 0); spin_unlock(&inode->i_lock); @@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode *inode, int how) wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, nfs_wait_bit_killable, TASK_KILLABLE); + else + goto out_mark_dirty; } else nfs_commit_clear_lock(NFS_I(inode)); -out: + return res; + /* Note: If we exit without ensuring that the commit is complete, + * we must mark the inode as dirty. Otherwise, future calls to + * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure + * that the data is on the disk. + */ +out_mark_dirty: + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); return res; } @@ -1509,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, struct page *page) }; int ret; - while(PagePrivate(page)) { + for (;;) { wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { ret = nfs_writepage_locked(page, &wbc); if (ret < 0) goto out_error; + continue; } - ret = sync_inode(inode, &wbc); + if (!PagePrivate(page)) + break; + ret = nfs_commit_inode(inode, FLUSH_SYNC); if (ret < 0) goto out_error; } diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 30292df443ce..c9a30d7ff6fc 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -27,7 +27,7 @@ #include "nilfs.h" #include "segment.h" -int nilfs_sync_file(struct file *file, struct dentry *dentry, int datasync) +int nilfs_sync_file(struct file *file, int datasync) { /* * Called from fsync() system call @@ -37,7 +37,7 @@ int nilfs_sync_file(struct file *file, struct dentry *dentry, int datasync) * This function should be implemented when the writeback function * will be implemented. */ - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; int err; if (!nilfs_inode_dirty(inode)) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 8723e5bfd071..47d6d7928122 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -228,7 +228,7 @@ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, struct page *, struct inode *); /* file.c */ -extern int nilfs_sync_file(struct file *, struct dentry *, int); +extern int nilfs_sync_file(struct file *, int); /* ioctl.c */ long nilfs_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index fe44d3feee4a..0f48e7c5d9e1 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1527,10 +1527,9 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp) * this problem for now. We do write the $BITMAP attribute if it is present * which is the important one for a directory so things are not too bad. */ -static int ntfs_dir_fsync(struct file *filp, struct dentry *dentry, - int datasync) +static int ntfs_dir_fsync(struct file *filp, int datasync) { - struct inode *bmp_vi, *vi = dentry->d_inode; + struct inode *bmp_vi, *vi = filp->f_mapping->host; int err, ret; ntfs_attr na; diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index a1924a0d2ab0..113ebd9f25a4 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2133,7 +2133,6 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, /** * ntfs_file_fsync - sync a file to disk * @filp: file to be synced - * @dentry: dentry describing the file to sync * @datasync: if non-zero only flush user data and not metadata * * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync @@ -2149,19 +2148,15 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, * Also, if @datasync is true, we do not wait on the inode to be written out * but we always wait on the page cache pages to be written out. * - * Note: In the past @filp could be NULL so we ignore it as we don't need it - * anyway. - * * Locking: Caller must hold i_mutex on the inode. * * TODO: We should probably also write all attribute/index inodes associated * with this inode but since we have no simple way of getting to them we ignore * this problem for now. */ -static int ntfs_file_fsync(struct file *filp, struct dentry *dentry, - int datasync) +static int ntfs_file_fsync(struct file *filp, int datasync) { - struct inode *vi = dentry->d_inode; + struct inode *vi = filp->f_mapping->host; int err, ret = 0; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 97e54b9e654b..6a13ea64c447 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -175,13 +175,12 @@ static int ocfs2_dir_release(struct inode *inode, struct file *file) return 0; } -static int ocfs2_sync_file(struct file *file, - struct dentry *dentry, - int datasync) +static int ocfs2_sync_file(struct file *file, int datasync) { int err = 0; journal_t *journal; - struct inode *inode = dentry->d_inode; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync, @@ -1053,7 +1052,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) } /* - * This will intentionally not wind up calling vmtruncate(), + * This will intentionally not wind up calling simple_setsize(), * since all the work for a size change has been done above. * Otherwise, we could get into problems with truncate as * ip_alloc_sem is used there to protect against i_size @@ -2119,9 +2118,13 @@ relock: * direct write may have instantiated a few * blocks outside i_size. Trim these off again. * Don't need i_size_read because we hold i_mutex. + * + * XXX(hch): this looks buggy because ocfs2 did not + * actually implement ->truncate. Take a look at + * the new truncate sequence and update this accordingly */ if (*ppos + count > inode->i_size) - vmtruncate(inode, inode->i_size); + simple_setsize(inode, inode->i_size); ret = written; goto out_dio; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 2c26ce251cb3..0eaa929a4dbf 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -879,13 +879,15 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend) if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) continue; if (unsuspend) - status = vfs_quota_enable( - sb_dqopt(sb)->files[type], - type, QFMT_OCFS2, - DQUOT_SUSPENDED); - else - status = vfs_quota_disable(sb, type, - DQUOT_SUSPENDED); + status = dquot_resume(sb, type); + else { + struct ocfs2_mem_dqinfo *oinfo; + + /* Cancel periodic syncing before suspending */ + oinfo = sb_dqinfo(sb, type)->dqi_priv; + cancel_delayed_work_sync(&oinfo->dqi_sync_work); + status = dquot_suspend(sb, type); + } if (status < 0) break; } @@ -916,8 +918,8 @@ static int ocfs2_enable_quotas(struct ocfs2_super *osb) status = -ENOENT; goto out_quota_off; } - status = vfs_quota_enable(inode[type], type, QFMT_OCFS2, - DQUOT_USAGE_ENABLED); + status = dquot_enable(inode[type], type, QFMT_OCFS2, + DQUOT_USAGE_ENABLED); if (status < 0) goto out_quota_off; } @@ -952,8 +954,8 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) /* Turn off quotas. This will remove all dquot structures from * memory and so they will be automatically synced to global * quota files */ - vfs_quota_disable(sb, type, DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED); + dquot_disable(sb, type, DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED); if (!inode) continue; iput(inode); @@ -962,7 +964,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) /* Handle quota on quotactl */ static int ocfs2_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount) + char *path) { unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; @@ -970,30 +972,24 @@ static int ocfs2_quota_on(struct super_block *sb, int type, int format_id, if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, feature[type])) return -EINVAL; - if (remount) - return 0; /* Just ignore it has been handled in - * ocfs2_remount() */ - return vfs_quota_enable(sb_dqopt(sb)->files[type], type, - format_id, DQUOT_LIMITS_ENABLED); + return dquot_enable(sb_dqopt(sb)->files[type], type, + format_id, DQUOT_LIMITS_ENABLED); } /* Handle quota off quotactl */ -static int ocfs2_quota_off(struct super_block *sb, int type, int remount) +static int ocfs2_quota_off(struct super_block *sb, int type) { - if (remount) - return 0; /* Ignore now and handle later in - * ocfs2_remount() */ - return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED); + return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); } static const struct quotactl_ops ocfs2_quotactl_ops = { .quota_on = ocfs2_quota_on, .quota_off = ocfs2_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk, + .quota_sync = dquot_quota_sync, + .get_info = dquot_get_dqinfo, + .set_info = dquot_set_dqinfo, + .get_dqblk = dquot_get_dqblk, + .set_dqblk = dquot_set_dqblk, }; static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/omfs/file.c b/fs/omfs/file.c index 399487c09364..6e7a3291bbe8 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -329,7 +329,7 @@ const struct file_operations omfs_file_operations = { .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, }; diff --git a/fs/pipe.c b/fs/pipe.c index d79872eba09a..db6eaaba0dd8 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -230,6 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe, return kmap(buf->page); } +EXPORT_SYMBOL(generic_pipe_buf_map); /** * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer @@ -249,6 +250,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe, } else kunmap(buf->page); } +EXPORT_SYMBOL(generic_pipe_buf_unmap); /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer @@ -279,6 +281,7 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe, return 1; } +EXPORT_SYMBOL(generic_pipe_buf_steal); /** * generic_pipe_buf_get - get a reference to a &struct pipe_buffer @@ -294,6 +297,7 @@ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { page_cache_get(buf->page); } +EXPORT_SYMBOL(generic_pipe_buf_get); /** * generic_pipe_buf_confirm - verify contents of the pipe buffer @@ -309,6 +313,7 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info, { return 0; } +EXPORT_SYMBOL(generic_pipe_buf_confirm); /** * generic_pipe_buf_release - put a reference to a &struct pipe_buffer @@ -323,6 +328,7 @@ void generic_pipe_buf_release(struct pipe_inode_info *pipe, { page_cache_release(buf->page); } +EXPORT_SYMBOL(generic_pipe_buf_release); static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, @@ -1169,14 +1175,18 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) switch (cmd) { case F_SETPIPE_SZ: - if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) - return -EINVAL; + if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) { + ret = -EINVAL; + goto out; + } /* * The pipe needs to be at least 2 pages large to * guarantee POSIX behaviour. */ - if (arg < 2) - return -EINVAL; + if (arg < 2) { + ret = -EINVAL; + goto out; + } ret = pipe_set_size(pipe, arg); break; case F_GETPIPE_SZ: @@ -1187,6 +1197,7 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) break; } +out: mutex_unlock(&pipe->inode->i_mutex); return ret; } diff --git a/fs/proc/array.c b/fs/proc/array.c index 885ab5513ac5..9b58d38bc911 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -267,7 +267,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) shpending = p->signal->shared_pending.signal; blocked = p->blocked; collect_sigign_sigcatch(p, &ignored, &caught); - num_threads = atomic_read(&p->signal->count); + num_threads = get_nr_threads(p); rcu_read_lock(); /* FIXME: is this correct? */ qsize = atomic_read(&__task_cred(p)->user->sigpending); rcu_read_unlock(); @@ -410,7 +410,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, tty_nr = new_encode_dev(tty_devnum(sig->tty)); } - num_threads = atomic_read(&sig->count); + num_threads = get_nr_threads(task); collect_sigign_sigcatch(task, &sigign, &sigcatch); cmin_flt = sig->cmin_flt; diff --git a/fs/proc/base.c b/fs/proc/base.c index c7f9f23449dc..acb7ef80ea4f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -166,18 +166,6 @@ static int get_fs_path(struct task_struct *task, struct path *path, bool root) return result; } -static int get_nr_threads(struct task_struct *tsk) -{ - unsigned long flags; - int count = 0; - - if (lock_task_sighand(tsk, &flags)) { - count = atomic_read(&tsk->signal->count); - unlock_task_sighand(tsk, &flags); - } - return count; -} - static int proc_cwd_link(struct inode *inode, struct path *path) { struct task_struct *task = get_proc_task(inode); @@ -2444,7 +2432,7 @@ static struct dentry *proc_base_instantiate(struct inode *dir, const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; - struct dentry *error = ERR_PTR(-EINVAL); + struct dentry *error; /* Allocate the inode */ error = ERR_PTR(-ENOMEM); @@ -2794,7 +2782,7 @@ out: struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { - struct dentry *result = ERR_PTR(-ENOENT); + struct dentry *result; struct task_struct *task; unsigned tgid; struct pid_namespace *ns; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 43c127490606..2791907744ed 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -343,21 +343,6 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */ /* * Return an inode number between PROC_DYNAMIC_FIRST and * 0xffffffff, or zero on failure. - * - * Current inode allocations in the proc-fs (hex-numbers): - * - * 00000000 reserved - * 00000001-00000fff static entries (goners) - * 001 root-ino - * - * 00001000-00001fff unused - * 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff - * 80000000-efffffff unused - * f0000000-ffffffff dynamic entries - * - * Goal: - * Once we split the thing into several virtual filesystems, - * we will get rid of magical ranges (and this comment, BTW). */ static unsigned int get_inode_number(void) { diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index c837a77351be..6f37c391468d 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -588,7 +588,7 @@ static struct kcore_list kcore_text; */ static void __init proc_kcore_text_init(void) { - kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT); + kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT); } #else static void __init proc_kcore_text_init(void) diff --git a/fs/proc/root.c b/fs/proc/root.c index 757c069f2a65..4258384ed22d 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -110,7 +110,6 @@ void __init proc_root_init(void) if (err) return; proc_mnt = kern_mount_data(&proc_fs_type, &init_pid_ns); - err = PTR_ERR(proc_mnt); if (IS_ERR(proc_mnt)) { unregister_filesystem(&proc_fs_type); return; diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index 6f30c3d5bcbf..6e8fc62b40a8 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -77,9 +77,10 @@ out: const struct file_operations qnx4_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = qnx4_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, }; const struct inode_operations qnx4_dir_inode_operations = diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 655a4c52b8c3..12c233da1b6b 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -228,10 +228,6 @@ static struct hlist_head *dquot_hash; struct dqstats dqstats; EXPORT_SYMBOL(dqstats); -#ifdef CONFIG_SMP -struct dqstats *dqstats_pcpu; -EXPORT_SYMBOL(dqstats_pcpu); -#endif static qsize_t inode_get_rsv_space(struct inode *inode); static void __dquot_initialize(struct inode *inode, int type); @@ -584,7 +580,7 @@ out: } EXPORT_SYMBOL(dquot_scan_active); -int vfs_quota_sync(struct super_block *sb, int type, int wait) +int dquot_quota_sync(struct super_block *sb, int type, int wait) { struct list_head *dirty; struct dquot *dquot; @@ -656,7 +652,7 @@ int vfs_quota_sync(struct super_block *sb, int type, int wait) return 0; } -EXPORT_SYMBOL(vfs_quota_sync); +EXPORT_SYMBOL(dquot_quota_sync); /* Free unused dquots from cache */ static void prune_dqcache(int count) @@ -676,27 +672,10 @@ static void prune_dqcache(int count) } } -static int dqstats_read(unsigned int type) -{ - int count = 0; -#ifdef CONFIG_SMP - int cpu; - for_each_possible_cpu(cpu) - count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type]; - /* Statistics reading is racy, but absolute accuracy isn't required */ - if (count < 0) - count = 0; -#else - count = dqstats.stat[type]; -#endif - return count; -} - /* * This is called from kswapd when we think we need some * more memory */ - static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) { if (nr) { @@ -704,7 +683,9 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) prune_dqcache(nr); spin_unlock(&dq_list_lock); } - return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure; + return ((unsigned) + percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]) + /100) * sysctl_vfs_cache_pressure; } static struct shrinker dqcache_shrinker = { @@ -1514,11 +1495,13 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) /* * This operation can block, but only after everything is updated */ -int __dquot_alloc_space(struct inode *inode, qsize_t number, - int warn, int reserve) +int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { int cnt, ret = 0; char warntype[MAXQUOTAS]; + int warn = flags & DQUOT_SPACE_WARN; + int reserve = flags & DQUOT_SPACE_RESERVE; + int nofail = flags & DQUOT_SPACE_NOFAIL; /* * First test before acquiring mutex - solves deadlocks when we @@ -1539,7 +1522,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, continue; ret = check_bdq(inode->i_dquot[cnt], number, !warn, warntype+cnt); - if (ret) { + if (ret && !nofail) { spin_unlock(&dq_data_lock); goto out_flush_warn; } @@ -1638,10 +1621,11 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty); /* * This operation can block, but only after everything is updated */ -void __dquot_free_space(struct inode *inode, qsize_t number, int reserve) +void __dquot_free_space(struct inode *inode, qsize_t number, int flags) { unsigned int cnt; char warntype[MAXQUOTAS]; + int reserve = flags & DQUOT_SPACE_RESERVE; /* First test before acquiring mutex - solves deadlocks when we * re-enter the quota code and are already holding the mutex */ @@ -1812,7 +1796,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) transfer_to[USRQUOTA] = dqget(sb, iattr->ia_uid, USRQUOTA); if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) - transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_uid, GRPQUOTA); + transfer_to[GRPQUOTA] = dqget(sb, iattr->ia_gid, GRPQUOTA); ret = __dquot_transfer(inode, transfer_to); dqput_all(transfer_to); @@ -1847,6 +1831,7 @@ const struct dquot_operations dquot_operations = { .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; +EXPORT_SYMBOL(dquot_operations); /* * Generic helper for ->open on filesystems supporting disk quotas. @@ -1865,7 +1850,7 @@ EXPORT_SYMBOL(dquot_file_open); /* * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) */ -int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags) +int dquot_disable(struct super_block *sb, int type, unsigned int flags) { int cnt, ret = 0; struct quota_info *dqopt = sb_dqopt(sb); @@ -1995,14 +1980,15 @@ put_inodes: } return ret; } -EXPORT_SYMBOL(vfs_quota_disable); +EXPORT_SYMBOL(dquot_disable); -int vfs_quota_off(struct super_block *sb, int type, int remount) +int dquot_quota_off(struct super_block *sb, int type) { - return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED : - (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED)); + return dquot_disable(sb, type, + DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); } -EXPORT_SYMBOL(vfs_quota_off); +EXPORT_SYMBOL(dquot_quota_off); + /* * Turn quotas on on a device */ @@ -2120,36 +2106,43 @@ out_fmt: } /* Reenable quotas on remount RW */ -static int vfs_quota_on_remount(struct super_block *sb, int type) +int dquot_resume(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); struct inode *inode; - int ret; + int ret = 0, cnt; unsigned int flags; - mutex_lock(&dqopt->dqonoff_mutex); - if (!sb_has_quota_suspended(sb, type)) { + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { + if (type != -1 && cnt != type) + continue; + + mutex_lock(&dqopt->dqonoff_mutex); + if (!sb_has_quota_suspended(sb, cnt)) { + mutex_unlock(&dqopt->dqonoff_mutex); + continue; + } + inode = dqopt->files[cnt]; + dqopt->files[cnt] = NULL; + spin_lock(&dq_state_lock); + flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | + DQUOT_LIMITS_ENABLED, + cnt); + dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); + spin_unlock(&dq_state_lock); mutex_unlock(&dqopt->dqonoff_mutex); - return 0; - } - inode = dqopt->files[type]; - dqopt->files[type] = NULL; - spin_lock(&dq_state_lock); - flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | - DQUOT_LIMITS_ENABLED, type); - dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type); - spin_unlock(&dq_state_lock); - mutex_unlock(&dqopt->dqonoff_mutex); - flags = dquot_generic_flag(flags, type); - ret = vfs_load_quota_inode(inode, type, dqopt->info[type].dqi_fmt_id, - flags); - iput(inode); + flags = dquot_generic_flag(flags, cnt); + ret = vfs_load_quota_inode(inode, cnt, + dqopt->info[cnt].dqi_fmt_id, flags); + iput(inode); + } return ret; } +EXPORT_SYMBOL(dquot_resume); -int vfs_quota_on_path(struct super_block *sb, int type, int format_id, +int dquot_quota_on_path(struct super_block *sb, int type, int format_id, struct path *path) { int error = security_quota_on(path->dentry); @@ -2164,40 +2157,36 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id, DQUOT_LIMITS_ENABLED); return error; } -EXPORT_SYMBOL(vfs_quota_on_path); +EXPORT_SYMBOL(dquot_quota_on_path); -int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name, - int remount) +int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name) { struct path path; int error; - if (remount) - return vfs_quota_on_remount(sb, type); - error = kern_path(name, LOOKUP_FOLLOW, &path); if (!error) { - error = vfs_quota_on_path(sb, type, format_id, &path); + error = dquot_quota_on_path(sb, type, format_id, &path); path_put(&path); } return error; } -EXPORT_SYMBOL(vfs_quota_on); +EXPORT_SYMBOL(dquot_quota_on); /* * More powerful function for turning on quotas allowing setting * of individual quota flags */ -int vfs_quota_enable(struct inode *inode, int type, int format_id, - unsigned int flags) +int dquot_enable(struct inode *inode, int type, int format_id, + unsigned int flags) { int ret = 0; struct super_block *sb = inode->i_sb; struct quota_info *dqopt = sb_dqopt(sb); /* Just unsuspend quotas? */ - if (flags & DQUOT_SUSPENDED) - return vfs_quota_on_remount(sb, type); + BUG_ON(flags & DQUOT_SUSPENDED); + if (!flags) return 0; /* Just updating flags needed? */ @@ -2229,13 +2218,13 @@ out_lock: load_quota: return vfs_load_quota_inode(inode, type, format_id, flags); } -EXPORT_SYMBOL(vfs_quota_enable); +EXPORT_SYMBOL(dquot_enable); /* * This function is used when filesystem needs to initialize quotas * during mount time. */ -int vfs_quota_on_mount(struct super_block *sb, char *qf_name, +int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type) { struct dentry *dentry; @@ -2261,24 +2250,7 @@ out: dput(dentry); return error; } -EXPORT_SYMBOL(vfs_quota_on_mount); - -/* Wrapper to turn on quotas when remounting rw */ -int vfs_dq_quota_on_remount(struct super_block *sb) -{ - int cnt; - int ret = 0, err; - - if (!sb->s_qcop || !sb->s_qcop->quota_on) - return -ENOSYS; - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1); - if (err < 0 && !ret) - ret = err; - } - return ret; -} -EXPORT_SYMBOL(vfs_dq_quota_on_remount); +EXPORT_SYMBOL(dquot_quota_on_mount); static inline qsize_t qbtos(qsize_t blocks) { @@ -2313,8 +2285,8 @@ static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di) spin_unlock(&dq_data_lock); } -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, - struct fs_disk_quota *di) +int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, + struct fs_disk_quota *di) { struct dquot *dquot; @@ -2326,7 +2298,7 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, return 0; } -EXPORT_SYMBOL(vfs_get_dqblk); +EXPORT_SYMBOL(dquot_get_dqblk); #define VFS_FS_DQ_MASK \ (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \ @@ -2425,7 +2397,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di) return 0; } -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, +int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, struct fs_disk_quota *di) { struct dquot *dquot; @@ -2441,10 +2413,10 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, out: return rc; } -EXPORT_SYMBOL(vfs_set_dqblk); +EXPORT_SYMBOL(dquot_set_dqblk); /* Generic routine for getting common part of quota file information */ -int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; @@ -2463,10 +2435,10 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return 0; } -EXPORT_SYMBOL(vfs_get_dqinfo); +EXPORT_SYMBOL(dquot_get_dqinfo); /* Generic routine for setting common part of quota file information */ -int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) +int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii) { struct mem_dqinfo *mi; int err = 0; @@ -2493,27 +2465,27 @@ out: mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); return err; } -EXPORT_SYMBOL(vfs_set_dqinfo); +EXPORT_SYMBOL(dquot_set_dqinfo); -const struct quotactl_ops vfs_quotactl_ops = { - .quota_on = vfs_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk +const struct quotactl_ops dquot_quotactl_ops = { + .quota_on = dquot_quota_on, + .quota_off = dquot_quota_off, + .quota_sync = dquot_quota_sync, + .get_info = dquot_get_dqinfo, + .set_info = dquot_set_dqinfo, + .get_dqblk = dquot_get_dqblk, + .set_dqblk = dquot_set_dqblk }; - +EXPORT_SYMBOL(dquot_quotactl_ops); static int do_proc_dqstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { -#ifdef CONFIG_SMP - /* Update global table */ unsigned int type = (int *)table->data - dqstats.stat; - dqstats.stat[type] = dqstats_read(type); -#endif + + /* Update global table */ + dqstats.stat[type] = + percpu_counter_sum_positive(&dqstats.counter[type]); return proc_dointvec(table, write, buffer, lenp, ppos); } @@ -2606,7 +2578,7 @@ static ctl_table sys_table[] = { static int __init dquot_init(void) { - int i; + int i, ret; unsigned long nr_hash, order; printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); @@ -2624,12 +2596,11 @@ static int __init dquot_init(void) if (!dquot_hash) panic("Cannot create dquot hash table"); -#ifdef CONFIG_SMP - dqstats_pcpu = alloc_percpu(struct dqstats); - if (!dqstats_pcpu) - panic("Cannot create dquot stats table"); -#endif - memset(&dqstats, 0, sizeof(struct dqstats)); + for (i = 0; i < _DQST_DQSTAT_LAST; i++) { + ret = percpu_counter_init(&dqstats.counter[i], 0); + if (ret) + panic("Cannot create dquot stat counters"); + } /* Find power-of-two hlist_heads which can fit into allocation */ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); diff --git a/fs/quota/quota.c b/fs/quota/quota.c index ce3dfd066f59..b299961e1edb 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -73,7 +73,7 @@ static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, if (IS_ERR(pathname)) return PTR_ERR(pathname); if (sb->s_qcop->quota_on) - ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); + ret = sb->s_qcop->quota_on(sb, type, id, pathname); putname(pathname); return ret; } @@ -260,7 +260,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, case Q_QUOTAOFF: if (!sb->s_qcop->quota_off) return -ENOSYS; - return sb->s_qcop->quota_off(sb, type, 0); + return sb->s_qcop->quota_off(sb, type); case Q_GETFMT: return quota_getfmt(sb, type, addr); case Q_GETINFO: diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index 78f613cb9c76..4884ac5ae9be 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -43,12 +43,13 @@ const struct file_operations ramfs_file_operations = { .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, - .fsync = simple_sync_file, + .fsync = noop_fsync, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .llseek = generic_file_llseek, }; const struct inode_operations ramfs_file_inode_operations = { + .setattr = simple_setattr, .getattr = simple_getattr, }; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 5ea4ad81a429..d532c20fc179 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -42,7 +42,7 @@ const struct file_operations ramfs_file_operations = { .aio_read = generic_file_aio_read, .write = do_sync_write, .aio_write = generic_file_aio_write, - .fsync = simple_sync_file, + .fsync = noop_fsync, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .llseek = generic_file_llseek, @@ -146,7 +146,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) return ret; } - ret = vmtruncate(inode, newsize); + ret = simple_setsize(inode, newsize); return ret; } @@ -169,7 +169,8 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) /* pick out size-changing events */ if (ia->ia_valid & ATTR_SIZE) { - loff_t size = i_size_read(inode); + loff_t size = inode->i_size; + if (ia->ia_size != size) { ret = ramfs_nommu_resize(inode, ia->ia_size, size); if (ret < 0 || ia->ia_valid == ATTR_SIZE) @@ -182,7 +183,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) } } - ret = inode_setattr(inode, ia); + generic_setattr(inode, ia); out: ia->ia_valid = old_ia_valid; return ret; diff --git a/fs/read_write.c b/fs/read_write.c index 113386d6fd2d..9c0485236e68 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -97,6 +97,23 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) } EXPORT_SYMBOL(generic_file_llseek); +/** + * noop_llseek - No Operation Performed llseek implementation + * @file: file structure to seek on + * @offset: file offset to seek to + * @origin: type of seek + * + * This is an implementation of ->llseek useable for the rare special case when + * userspace expects the seek to succeed but the (device) file is actually not + * able to perform the seek. In this case you use noop_llseek() instead of + * falling back to the default implementation of ->llseek. + */ +loff_t noop_llseek(struct file *file, loff_t offset, int origin) +{ + return file->f_pos; +} +EXPORT_SYMBOL(noop_llseek); + loff_t no_llseek(struct file *file, loff_t offset, int origin) { return -ESPIPE; diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 07930449a958..198dabf1b2bb 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -14,10 +14,10 @@ extern const struct reiserfs_key MIN_KEY; static int reiserfs_readdir(struct file *, void *, filldir_t); -static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, - int datasync); +static int reiserfs_dir_fsync(struct file *filp, int datasync); const struct file_operations reiserfs_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = reiserfs_readdir, .fsync = reiserfs_dir_fsync, @@ -27,10 +27,9 @@ const struct file_operations reiserfs_dir_operations = { #endif }; -static int reiserfs_dir_fsync(struct file *filp, struct dentry *dentry, - int datasync) +static int reiserfs_dir_fsync(struct file *filp, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; int err; reiserfs_write_lock(inode->i_sb); err = reiserfs_commit_for_inode(inode); diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 9977df9f3a54..b82cdd8a45dd 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -134,10 +134,9 @@ static void reiserfs_vfs_truncate_file(struct inode *inode) * be removed... */ -static int reiserfs_sync_file(struct file *filp, - struct dentry *dentry, int datasync) +static int reiserfs_sync_file(struct file *filp, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; int err; int barrier_done; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 59125fb36d42..9822fa15118b 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -158,6 +158,7 @@ static int finish_unfinished(struct super_block *s) #ifdef CONFIG_QUOTA int i; int ms_active_set; + int quota_enabled[MAXQUOTAS]; #endif /* compose key to look for "save" links */ @@ -179,8 +180,15 @@ static int finish_unfinished(struct super_block *s) } /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { + quota_enabled[i] = 1; if (REISERFS_SB(s)->s_qf_names[i]) { - int ret = reiserfs_quota_on_mount(s, i); + int ret; + + if (sb_has_quota_active(s, i)) { + quota_enabled[i] = 0; + continue; + } + ret = reiserfs_quota_on_mount(s, i); if (ret < 0) reiserfs_warning(s, "reiserfs-2500", "cannot turn on journaled " @@ -304,8 +312,8 @@ static int finish_unfinished(struct super_block *s) #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { - if (sb_dqopt(s)->files[i]) - vfs_quota_off(s, i, 0); + if (sb_dqopt(s)->files[i] && quota_enabled[i]) + dquot_quota_off(s, i); } if (ms_active_set) /* Restore the flag back */ @@ -466,6 +474,8 @@ static void reiserfs_put_super(struct super_block *s) struct reiserfs_transaction_handle th; th.t_trans_id = 0; + dquot_disable(s, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); + reiserfs_write_lock(s); if (s->s_dirt) @@ -620,7 +630,7 @@ static int reiserfs_acquire_dquot(struct dquot *); static int reiserfs_release_dquot(struct dquot *); static int reiserfs_mark_dquot_dirty(struct dquot *); static int reiserfs_write_info(struct super_block *, int); -static int reiserfs_quota_on(struct super_block *, int, int, char *, int); +static int reiserfs_quota_on(struct super_block *, int, int, char *); static const struct dquot_operations reiserfs_quota_operations = { .write_dquot = reiserfs_write_dquot, @@ -634,12 +644,12 @@ static const struct dquot_operations reiserfs_quota_operations = { static const struct quotactl_ops reiserfs_qctl_operations = { .quota_on = reiserfs_quota_on, - .quota_off = vfs_quota_off, - .quota_sync = vfs_quota_sync, - .get_info = vfs_get_dqinfo, - .set_info = vfs_set_dqinfo, - .get_dqblk = vfs_get_dqblk, - .set_dqblk = vfs_set_dqblk, + .quota_off = dquot_quota_off, + .quota_sync = dquot_quota_sync, + .get_info = dquot_get_dqinfo, + .set_info = dquot_set_dqinfo, + .get_dqblk = dquot_get_dqblk, + .set_dqblk = dquot_set_dqblk, }; #endif @@ -1242,6 +1252,11 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) if (s->s_flags & MS_RDONLY) /* it is read-only already */ goto out_ok; + + err = dquot_suspend(s, -1); + if (err < 0) + goto out_err; + /* try to remount file system with read-only permissions */ if (sb_umount_state(rs) == REISERFS_VALID_FS || REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) { @@ -1295,6 +1310,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) s->s_dirt = 0; if (!(*mount_flags & MS_RDONLY)) { + dquot_resume(s, -1); finish_unfinished(s); reiserfs_xattr_init(s, *mount_flags); } @@ -2022,15 +2038,15 @@ static int reiserfs_write_info(struct super_block *sb, int type) */ static int reiserfs_quota_on_mount(struct super_block *sb, int type) { - return vfs_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type], - REISERFS_SB(sb)->s_jquota_fmt, type); + return dquot_quota_on_mount(sb, REISERFS_SB(sb)->s_qf_names[type], + REISERFS_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, - char *name, int remount) + char *name) { int err; struct path path; @@ -2039,9 +2055,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) return -EINVAL; - /* No more checks needed? Path and format_id are bogus anyway... */ - if (remount) - return vfs_quota_on(sb, type, format_id, name, 1); + err = kern_path(name, LOOKUP_FOLLOW, &path); if (err) return err; @@ -2085,7 +2099,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (err) goto out; } - err = vfs_quota_on_path(sb, type, format_id, &path); + err = dquot_quota_on_path(sb, type, format_id, &path); out: path_put(&path); return err; diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 6c978428892d..00a70cab1f36 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c @@ -37,6 +37,7 @@ static int smb_link(struct dentry *, struct inode *, struct dentry *); const struct file_operations smb_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = smb_readdir, .unlocked_ioctl = smb_ioctl, diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c index 84ecf0e43f91..8e187a0f94bb 100644 --- a/fs/smbfs/file.c +++ b/fs/smbfs/file.c @@ -28,8 +28,9 @@ #include "proto.h" static int -smb_fsync(struct file *file, struct dentry * dentry, int datasync) +smb_fsync(struct file *file, int datasync) { + struct dentry *dentry = file->f_path.dentry; struct smb_sb_info *server = server_from_dentry(dentry); int result; diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index dfa1d67f8fca..9551cb6f7fe4 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -714,7 +714,7 @@ smb_notify_change(struct dentry *dentry, struct iattr *attr) error = server->ops->truncate(inode, attr->ia_size); if (error) goto out; - error = vmtruncate(inode, attr->ia_size); + error = simple_setsize(inode, attr->ia_size); if (error) goto out; refresh = 1; diff --git a/fs/super.c b/fs/super.c index 69688b15f1fa..5c35bc7a499e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include /* for the emergency remount stuff */ @@ -94,8 +93,6 @@ static struct super_block *alloc_super(struct file_system_type *type) init_rwsem(&s->s_dquot.dqptr_sem); init_waitqueue_head(&s->s_wait_unfrozen); s->s_maxbytes = MAX_NON_LFS; - s->dq_op = sb_dquot_ops; - s->s_qcop = sb_quotactl_ops; s->s_op = &default_op; s->s_time_gran = 1000000000; } @@ -160,7 +157,6 @@ void deactivate_locked_super(struct super_block *s) { struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { - vfs_dq_off(s, 0); fs->kill_sb(s); put_filesystem(fs); put_super(s); @@ -524,7 +520,7 @@ rescan: int do_remount_sb(struct super_block *sb, int flags, void *data, int force) { int retval; - int remount_rw, remount_ro; + int remount_ro; if (sb->s_frozen != SB_UNFROZEN) return -EBUSY; @@ -540,7 +536,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) sync_filesystem(sb); remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); - remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY); /* If we are remounting RDONLY and current sb is read/write, make sure there are no rw files opened */ @@ -549,9 +544,6 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) mark_files_ro(sb); else if (!fs_may_remount_ro(sb)) return -EBUSY; - retval = vfs_dq_off(sb, 1); - if (retval < 0 && retval != -ENOSYS) - return -EBUSY; } if (sb->s_op->remount_fs) { @@ -560,8 +552,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) return retval; } sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); - if (remount_rw) - vfs_dq_quota_on_remount(sb); + /* * Some filesystems modify their metadata via some other path than the * bdev buffer cache (eg. use a private mapping, or directories in @@ -946,8 +937,8 @@ out: EXPORT_SYMBOL_GPL(vfs_kern_mount); /** - * freeze_super -- lock the filesystem and force it into a consistent state - * @super: the super to lock + * freeze_super - lock the filesystem and force it into a consistent state + * @sb: the super to lock * * Syncs the super to make sure the filesystem is consistent and calls the fs's * freeze_fs. Subsequent calls to this without first thawing the fs will return diff --git a/fs/sync.c b/fs/sync.c index e8cbd415e50a..c9f83f480ec5 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -130,12 +130,10 @@ void emergency_sync(void) /* * Generic function to fsync a file. - * - * filp may be NULL if called via the msync of a vma. */ -int file_fsync(struct file *filp, struct dentry *dentry, int datasync) +int file_fsync(struct file *filp, int datasync) { - struct inode * inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; struct super_block * sb; int ret, err; @@ -183,7 +181,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) * livelocks in fsync_buffers_list(). */ mutex_lock(&mapping->host->i_mutex); - err = file->f_op->fsync(file, file->f_path.dentry, datasync); + err = file->f_op->fsync(file, datasync); if (!ret) ret = err; mutex_unlock(&mapping->host->i_mutex); diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index bbd77e95cf7f..bde1a4c3679a 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -117,13 +117,11 @@ int sysfs_setattr(struct dentry *dentry, struct iattr *iattr) if (error) goto out; - iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */ - - error = inode_setattr(inode, iattr); - if (error) - goto out; + /* this ignores size changes */ + generic_setattr(inode, iattr); error = sysfs_sd_setattr(sd, iattr); + out: mutex_unlock(&sysfs_mutex); return error; diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 1dabed286b4c..79941e4964a4 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -24,7 +24,7 @@ const struct file_operations sysv_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = sysv_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, }; static inline void dir_put_page(struct page *page) diff --git a/fs/sysv/file.c b/fs/sysv/file.c index 96340c01f4a7..750cc22349bd 100644 --- a/fs/sysv/file.c +++ b/fs/sysv/file.c @@ -26,7 +26,7 @@ const struct file_operations sysv_file_operations = { .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, }; diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 4573734d723d..d4a5380b5669 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -43,6 +43,7 @@ static int sysv_sync_fs(struct super_block *sb, int wait) * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ + sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5692cf72b807..12f445cee9f7 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -967,12 +967,15 @@ static int do_writepage(struct page *page, int len) * the page locked, and it locks @ui_mutex. However, write-back does take inode * @i_mutex, which means other VFS operations may be run on this inode at the * same time. And the problematic one is truncation to smaller size, from where - * we have to call 'vmtruncate()', which first changes @inode->i_size, then + * we have to call 'simple_setsize()', which first changes @inode->i_size, then * drops the truncated pages. And while dropping the pages, it takes the page - * lock. This means that 'do_truncation()' cannot call 'vmtruncate()' with + * lock. This means that 'do_truncation()' cannot call 'simple_setsize()' with * @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This * means that @inode->i_size is changed while @ui_mutex is unlocked. * + * XXX: with the new truncate the above is not true anymore, the simple_setsize + * calls can be replaced with the individual components. + * * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond * inode size. How do we do this if @inode->i_size may became smaller while we * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the @@ -1125,7 +1128,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, budgeted = 0; } - err = vmtruncate(inode, new_size); + err = simple_setsize(inode, new_size); if (err) goto out_budg; @@ -1214,7 +1217,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, if (attr->ia_valid & ATTR_SIZE) { dbg_gen("size %lld -> %lld", inode->i_size, new_size); - err = vmtruncate(inode, new_size); + err = simple_setsize(inode, new_size); if (err) goto out; } @@ -1223,7 +1226,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, if (attr->ia_valid & ATTR_SIZE) { /* Truncation changes inode [mc]time */ inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); - /* 'vmtruncate()' changed @i_size, update @ui_size */ + /* 'simple_setsize()' changed @i_size, update @ui_size */ ui->ui_size = inode->i_size; } @@ -1304,9 +1307,9 @@ static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd) return NULL; } -int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync) +int ubifs_fsync(struct file *file, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; int err; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index bd2542dad014..2eef553d50c8 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -379,7 +379,7 @@ struct ubifs_gced_idx_leb { * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot * make sure @inode->i_size is always changed under @ui_mutex, because it - * cannot call 'vmtruncate()' with @ui_mutex locked, because it would deadlock + * cannot call 'simple_setsize()' with @ui_mutex locked, because it would deadlock * with 'ubifs_writepage()' (see file.c). All the other inode fields are * changed under @ui_mutex, so they do not need "shadow" fields. Note, one * could consider to rework locking and base it on "shadow" fields. @@ -1678,7 +1678,7 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c); int ubifs_calc_dark(const struct ubifs_info *c, int spc); /* file.c */ -int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync); +int ubifs_fsync(struct file *file, int datasync); int ubifs_setattr(struct dentry *dentry, struct iattr *attr); /* dir.c */ diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 9a9378b4eb5a..b608efaa4cee 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -21,7 +21,6 @@ #include "udfdecl.h" -#include #include #include @@ -159,8 +158,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb, udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); } else { - if (inode) - dquot_free_block(inode, 1); udf_add_free_space(sb, sbi->s_partition, 1); } } @@ -210,15 +207,8 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb, bit = block % (sb->s_blocksize << 3); while (bit < (sb->s_blocksize << 3) && block_count > 0) { - if (!udf_test_bit(bit, bh->b_data)) + if (!udf_clear_bit(bit, bh->b_data)) goto out; - else if (dquot_prealloc_block(inode, 1)) - goto out; - else if (!udf_clear_bit(bit, bh->b_data)) { - udf_debug("bit already cleared for block %d\n", bit); - dquot_free_block(inode, 1); - goto out; - } block_count--; alloc_count++; bit++; @@ -338,20 +328,6 @@ search_back: } got_block: - - /* - * Check quota for allocation of this block. - */ - if (inode) { - int ret = dquot_alloc_block(inode, 1); - - if (ret) { - mutex_unlock(&sbi->s_alloc_mutex); - *err = ret; - return 0; - } - } - newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - (sizeof(struct spaceBitmapDesc) << 3); @@ -401,10 +377,6 @@ static void udf_table_free_blocks(struct super_block *sb, } iinfo = UDF_I(table); - /* We do this up front - There are some error conditions that - could occure, but.. oh well */ - if (inode) - dquot_free_block(inode, count); udf_add_free_space(sb, sbi->s_partition, count); start = bloc->logicalBlockNum + offset; @@ -649,10 +621,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb, epos.offset -= adsize; alloc_count = (elen >> sb->s_blocksize_bits); - if (inode && dquot_prealloc_block(inode, - alloc_count > block_count ? block_count : alloc_count)) - alloc_count = 0; - else if (alloc_count > block_count) { + if (alloc_count > block_count) { alloc_count = block_count; eloc.logicalBlockNum += alloc_count; elen -= (alloc_count << sb->s_blocksize_bits); @@ -752,14 +721,6 @@ static int udf_table_new_block(struct super_block *sb, newblock = goal_eloc.logicalBlockNum; goal_eloc.logicalBlockNum++; goal_elen -= sb->s_blocksize; - if (inode) { - *err = dquot_alloc_block(inode, 1); - if (*err) { - brelse(goal_epos.bh); - mutex_unlock(&sbi->s_alloc_mutex); - return 0; - } - } if (goal_elen) udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1); diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 3a84455c2a77..51552bf50225 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -207,8 +207,9 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) /* readdir and lookup functions */ const struct file_operations udf_dir_operations = { + .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = udf_readdir, .unlocked_ioctl = udf_ioctl, - .fsync = simple_fsync, + .fsync = generic_file_fsync, }; diff --git a/fs/udf/file.c b/fs/udf/file.c index baae3a723946..94e06d6bddbd 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -219,39 +218,16 @@ const struct file_operations udf_file_operations = { .read = do_sync_read, .aio_read = generic_file_aio_read, .unlocked_ioctl = udf_ioctl, - .open = dquot_file_open, + .open = generic_file_open, .mmap = generic_file_mmap, .write = do_sync_write, .aio_write = udf_file_aio_write, .release = udf_release_file, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, .llseek = generic_file_llseek, }; -int udf_setattr(struct dentry *dentry, struct iattr *iattr) -{ - struct inode *inode = dentry->d_inode; - int error; - - error = inode_change_ok(inode, iattr); - if (error) - return error; - - if (is_quota_modification(inode, iattr)) - dquot_initialize(inode); - - if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || - (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { - error = dquot_transfer(inode, iattr); - if (error) - return error; - } - - return inode_setattr(inode, iattr); -} - const struct inode_operations udf_file_inode_operations = { .truncate = udf_truncate, - .setattr = udf_setattr, }; diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 2b5586c7f02a..18cd7111185d 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -20,7 +20,6 @@ #include "udfdecl.h" #include -#include #include #include @@ -32,13 +31,6 @@ void udf_free_inode(struct inode *inode) struct super_block *sb = inode->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); - /* - * Note: we must free any quota before locking the superblock, - * as writing the quota to disk may need the lock as well. - */ - dquot_free_inode(inode); - dquot_drop(inode); - clear_inode(inode); mutex_lock(&sbi->s_alloc_mutex); @@ -61,7 +53,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) struct super_block *sb = dir->i_sb; struct udf_sb_info *sbi = UDF_SB(sb); struct inode *inode; - int block, ret; + int block; uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; struct udf_inode_info *iinfo; struct udf_inode_info *dinfo = UDF_I(dir); @@ -146,17 +138,6 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) insert_inode_hash(inode); mark_inode_dirty(inode); - dquot_initialize(inode); - ret = dquot_alloc_inode(inode); - if (ret) { - dquot_drop(inode); - inode->i_flags |= S_NOQUOTA; - inode->i_nlink = 0; - iput(inode); - *err = ret; - return NULL; - } - *err = 0; return inode; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 8a3fbd177cab..124852bcf6fe 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -71,9 +70,6 @@ static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); void udf_delete_inode(struct inode *inode) { - if (!is_bad_inode(inode)) - dquot_initialize(inode); - truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) @@ -113,7 +109,6 @@ void udf_clear_inode(struct inode *inode) (unsigned long long)iinfo->i_lenExtents); } - dquot_drop(inode); kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; } diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 585f733615dc..bf5fc674193c 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -563,8 +562,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, int err; struct udf_inode_info *iinfo; - dquot_initialize(dir); - lock_kernel(); inode = udf_new_inode(dir, mode, &err); if (!inode) { @@ -617,8 +614,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, if (!old_valid_dev(rdev)) return -EINVAL; - dquot_initialize(dir); - lock_kernel(); err = -EIO; inode = udf_new_inode(dir, mode, &err); @@ -664,8 +659,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct udf_inode_info *dinfo = UDF_I(dir); struct udf_inode_info *iinfo; - dquot_initialize(dir); - lock_kernel(); err = -EMLINK; if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) @@ -800,8 +793,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) struct fileIdentDesc *fi, cfi; struct kernel_lb_addr tloc; - dquot_initialize(dir); - retval = -ENOENT; lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); @@ -848,8 +839,6 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry) struct fileIdentDesc cfi; struct kernel_lb_addr tloc; - dquot_initialize(dir); - retval = -ENOENT; lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); @@ -904,8 +893,6 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, struct buffer_head *bh; struct udf_inode_info *iinfo; - dquot_initialize(dir); - lock_kernel(); inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); if (!inode) @@ -1075,8 +1062,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, int err; struct buffer_head *bh; - dquot_initialize(dir); - lock_kernel(); if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { unlock_kernel(); @@ -1139,9 +1124,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, struct kernel_lb_addr tloc; struct udf_inode_info *old_iinfo = UDF_I(old_inode); - dquot_initialize(old_dir); - dquot_initialize(new_dir); - lock_kernel(); ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); if (ofi) { @@ -1387,7 +1369,6 @@ const struct export_operations udf_export_ops = { const struct inode_operations udf_dir_inode_operations = { .lookup = udf_lookup, .create = udf_create, - .setattr = udf_setattr, .link = udf_link, .unlink = udf_unlink, .symlink = udf_symlink, @@ -1400,5 +1381,4 @@ const struct inode_operations udf_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, - .setattr = udf_setattr, }; diff --git a/fs/udf/super.c b/fs/udf/super.c index 1e4543cbcd27..612d1e2e285a 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -557,6 +557,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) { struct udf_options uopt; struct udf_sb_info *sbi = UDF_SB(sb); + int error = 0; uopt.flags = sbi->s_flags; uopt.uid = sbi->s_uid; @@ -582,17 +583,17 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) *flags |= MS_RDONLY; } - if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { - unlock_kernel(); - return 0; - } + if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) + goto out_unlock; + if (*flags & MS_RDONLY) udf_close_lvid(sb); else udf_open_lvid(sb); +out_unlock: unlock_kernel(); - return 0; + return error; } /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */ @@ -1939,7 +1940,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) /* Fill in the rest of the superblock */ sb->s_op = &udf_sb_ops; sb->s_export_op = &udf_export_ops; - sb->dq_op = NULL; + sb->s_dirt = 0; sb->s_magic = UDF_SUPER_MAGIC; sb->s_time_gran = 1000; diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 9079ff7d6255..2bac0354891f 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -131,7 +131,6 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, /* file.c */ extern long udf_ioctl(struct file *, unsigned int, unsigned long); -extern int udf_setattr(struct dentry *dentry, struct iattr *iattr); /* inode.c */ extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); extern int udf_sync_inode(struct inode *); diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 5cfa4d85ccf2..048484fb10d2 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -85,9 +84,6 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) "bit already cleared for fragment %u", i); } - dquot_free_block(inode, count); - - fs32_add(sb, &ucg->cg_cs.cs_nffree, count); uspi->cs_total.cs_nffree += count; fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); @@ -195,7 +191,6 @@ do_more: ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, 1); - dquot_free_block(inode, uspi->s_fpb); fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree++; @@ -511,7 +506,6 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, struct ufs_cg_private_info * ucpi; struct ufs_cylinder_group * ucg; unsigned cgno, fragno, fragoff, count, fragsize, i; - int ret; UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", (unsigned long long)fragment, oldcount, newcount); @@ -557,11 +551,6 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment, fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); for (i = oldcount; i < newcount; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); - ret = dquot_alloc_block(inode, count); - if (ret) { - *err = ret; - return 0; - } fs32_sub(sb, &ucg->cg_cs.cs_nffree, count); fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); @@ -598,7 +587,6 @@ static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno, struct ufs_cylinder_group * ucg; unsigned oldcg, i, j, k, allocsize; u64 result; - int ret; UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", inode->i_ino, cgno, (unsigned long long)goal, count); @@ -667,7 +655,6 @@ cg_found: for (i = count; i < uspi->s_fpb; i++) ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); i = uspi->s_fpb - count; - dquot_free_block(inode, i); fs32_add(sb, &ucg->cg_cs.cs_nffree, i); uspi->cs_total.cs_nffree += i; @@ -679,11 +666,6 @@ cg_found: result = ufs_bitmap_search (sb, ucpi, goal, allocsize); if (result == INVBLOCK) return 0; - ret = dquot_alloc_block(inode, count); - if (ret) { - *err = ret; - return 0; - } for (i = 0; i < count; i++) ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); @@ -718,7 +700,6 @@ static u64 ufs_alloccg_block(struct inode *inode, struct ufs_super_block_first * usb1; struct ufs_cylinder_group * ucg; u64 result, blkno; - int ret; UFSD("ENTER, goal %llu\n", (unsigned long long)goal); @@ -752,11 +733,6 @@ gotit: ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) ufs_clusteracct (sb, ucpi, blkno, -1); - ret = dquot_alloc_block(inode, uspi->s_fpb); - if (ret) { - *err = ret; - return INVBLOCK; - } fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1); uspi->cs_total.cs_nbfree--; diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 317a0d444f6b..ec784756dc65 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -666,6 +666,6 @@ not_empty: const struct file_operations ufs_dir_operations = { .read = generic_read_dir, .readdir = ufs_readdir, - .fsync = simple_fsync, + .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/ufs/file.c b/fs/ufs/file.c index a8962cecde5b..33afa20d4509 100644 --- a/fs/ufs/file.c +++ b/fs/ufs/file.c @@ -24,7 +24,6 @@ */ #include -#include #include "ufs_fs.h" #include "ufs.h" @@ -41,7 +40,7 @@ const struct file_operations ufs_file_operations = { .write = do_sync_write, .aio_write = generic_file_aio_write, .mmap = generic_file_mmap, - .open = dquot_file_open, - .fsync = simple_fsync, + .open = generic_file_open, + .fsync = generic_file_fsync, .splice_read = generic_file_splice_read, }; diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 3a959d55084d..594480e537d2 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -95,9 +94,6 @@ void ufs_free_inode (struct inode * inode) is_directory = S_ISDIR(inode->i_mode); - dquot_free_inode(inode); - dquot_drop(inode); - clear_inode (inode); if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_iusedoff, bit)) @@ -347,21 +343,12 @@ cg_found: unlock_super (sb); - dquot_initialize(inode); - err = dquot_alloc_inode(inode); - if (err) { - dquot_drop(inode); - goto fail_without_unlock; - } - UFSD("allocating inode %lu\n", inode->i_ino); UFSD("EXIT\n"); return inode; fail_remove_inode: unlock_super(sb); -fail_without_unlock: - inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); UFSD("EXIT (FAILED): err %d\n", err); diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index cffa756f1047..73fe773aa034 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -37,7 +37,6 @@ #include #include #include -#include #include "ufs_fs.h" #include "ufs.h" @@ -910,9 +909,6 @@ void ufs_delete_inode (struct inode * inode) { loff_t old_i_size; - if (!is_bad_inode(inode)) - dquot_initialize(inode); - truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) goto no_delete; diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index eabc02eb1294..b056f02b1fb3 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -30,7 +30,6 @@ #include #include #include -#include #include "ufs_fs.h" #include "ufs.h" @@ -86,8 +85,6 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, int mode, UFSD("BEGIN\n"); - dquot_initialize(dir); - inode = ufs_new_inode(dir, mode); err = PTR_ERR(inode); @@ -112,8 +109,6 @@ static int ufs_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t if (!old_valid_dev(rdev)) return -EINVAL; - dquot_initialize(dir); - inode = ufs_new_inode(dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { @@ -138,8 +133,6 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, if (l > sb->s_blocksize) goto out_notlocked; - dquot_initialize(dir); - lock_kernel(); inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); err = PTR_ERR(inode); @@ -185,8 +178,6 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, return -EMLINK; } - dquot_initialize(dir); - inode->i_ctime = CURRENT_TIME_SEC; inode_inc_link_count(inode); atomic_inc(&inode->i_count); @@ -204,8 +195,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, int mode) if (dir->i_nlink >= UFS_LINK_MAX) goto out; - dquot_initialize(dir); - lock_kernel(); inode_inc_link_count(dir); @@ -250,8 +239,6 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry) struct page *page; int err = -ENOENT; - dquot_initialize(dir); - de = ufs_find_entry(dir, &dentry->d_name, &page); if (!de) goto out; @@ -296,9 +283,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, struct ufs_dir_entry *old_de; int err = -ENOENT; - dquot_initialize(old_dir); - dquot_initialize(new_dir); - old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_de) goto out; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 14743d935a93..3ec5a9eb6efb 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -77,7 +77,6 @@ #include #include -#include #include #include #include @@ -918,6 +917,7 @@ again: sbi->s_bytesex = BYTESEX_LE; switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { case UFS_MAGIC: + case UFS_MAGIC_BW: case UFS2_MAGIC: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: @@ -927,6 +927,7 @@ again: sbi->s_bytesex = BYTESEX_BE; switch ((uspi->fs_magic = fs32_to_cpu(sb, usb3->fs_magic))) { case UFS_MAGIC: + case UFS_MAGIC_BW: case UFS2_MAGIC: case UFS_MAGIC_LFN: case UFS_MAGIC_FEA: @@ -1045,7 +1046,7 @@ magic_found: */ sb->s_op = &ufs_super_ops; sb->s_export_op = &ufs_export_ops; - sb->dq_op = NULL; /***/ + sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic); uspi->s_sblkno = fs32_to_cpu(sb, usb1->fs_sblkno); @@ -1435,126 +1436,19 @@ static void destroy_inodecache(void) kmem_cache_destroy(ufs_inode_cachep); } -static void ufs_clear_inode(struct inode *inode) -{ - dquot_drop(inode); -} - -#ifdef CONFIG_QUOTA -static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t); -static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t); -#endif - static const struct super_operations ufs_super_ops = { .alloc_inode = ufs_alloc_inode, .destroy_inode = ufs_destroy_inode, .write_inode = ufs_write_inode, .delete_inode = ufs_delete_inode, - .clear_inode = ufs_clear_inode, .put_super = ufs_put_super, .write_super = ufs_write_super, .sync_fs = ufs_sync_fs, .statfs = ufs_statfs, .remount_fs = ufs_remount, .show_options = ufs_show_options, -#ifdef CONFIG_QUOTA - .quota_read = ufs_quota_read, - .quota_write = ufs_quota_write, -#endif }; -#ifdef CONFIG_QUOTA - -/* Read data from quotafile - avoid pagecache and such because we cannot afford - * acquiring the locks... As quota files are never truncated and quota code - * itself serializes the operations (and noone else should touch the files) - * we don't have to be afraid of races */ -static ssize_t ufs_quota_read(struct super_block *sb, int type, char *data, - size_t len, loff_t off) -{ - struct inode *inode = sb_dqopt(sb)->files[type]; - sector_t blk = off >> sb->s_blocksize_bits; - int err = 0; - int offset = off & (sb->s_blocksize - 1); - int tocopy; - size_t toread; - struct buffer_head *bh; - loff_t i_size = i_size_read(inode); - - if (off > i_size) - return 0; - if (off+len > i_size) - len = i_size-off; - toread = len; - while (toread > 0) { - tocopy = sb->s_blocksize - offset < toread ? - sb->s_blocksize - offset : toread; - - bh = ufs_bread(inode, blk, 0, &err); - if (err) - return err; - if (!bh) /* A hole? */ - memset(data, 0, tocopy); - else { - memcpy(data, bh->b_data+offset, tocopy); - brelse(bh); - } - offset = 0; - toread -= tocopy; - data += tocopy; - blk++; - } - return len; -} - -/* Write to quotafile */ -static ssize_t ufs_quota_write(struct super_block *sb, int type, - const char *data, size_t len, loff_t off) -{ - struct inode *inode = sb_dqopt(sb)->files[type]; - sector_t blk = off >> sb->s_blocksize_bits; - int err = 0; - int offset = off & (sb->s_blocksize - 1); - int tocopy; - size_t towrite = len; - struct buffer_head *bh; - - mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); - while (towrite > 0) { - tocopy = sb->s_blocksize - offset < towrite ? - sb->s_blocksize - offset : towrite; - - bh = ufs_bread(inode, blk, 1, &err); - if (!bh) - goto out; - lock_buffer(bh); - memcpy(bh->b_data+offset, data, tocopy); - flush_dcache_page(bh->b_page); - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - unlock_buffer(bh); - brelse(bh); - offset = 0; - towrite -= tocopy; - data += tocopy; - blk++; - } -out: - if (len == towrite) { - mutex_unlock(&inode->i_mutex); - return err; - } - if (inode->i_size < off+len-towrite) - i_size_write(inode, off+len-towrite); - inode->i_version++; - inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - mutex_unlock(&inode->i_mutex); - return len - towrite; -} - -#endif - static int ufs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index f294c44577dc..589e01a465ba 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c @@ -44,7 +44,6 @@ #include #include #include -#include #include "ufs_fs.h" #include "ufs.h" @@ -501,12 +500,10 @@ out: return err; } - /* - * We don't define our `inode->i_op->truncate', and call it here, - * because of: - * - there is no way to know old size - * - there is no way inform user about error, if it happens in `truncate' + * TODO: + * - truncate case should use proper ordering instead of using + * simple_setsize */ int ufs_setattr(struct dentry *dentry, struct iattr *attr) { @@ -518,19 +515,10 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr) if (error) return error; - if (is_quota_modification(inode, attr)) - dquot_initialize(inode); - - if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { - error = dquot_transfer(inode, attr); - if (error) - return error; - } if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { loff_t old_i_size = inode->i_size; - error = vmtruncate(inode, attr->ia_size); + error = simple_setsize(inode, attr->ia_size); if (error) return error; error = ufs_truncate(inode, old_i_size); diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h index 6943ec677c0b..8aba544f9fad 100644 --- a/fs/ufs/ufs_fs.h +++ b/fs/ufs/ufs_fs.h @@ -48,6 +48,7 @@ typedef __u16 __bitwise __fs16; #define UFS_SECTOR_SIZE 512 #define UFS_SECTOR_BITS 9 #define UFS_MAGIC 0x00011954 +#define UFS_MAGIC_BW 0x0f242697 #define UFS2_MAGIC 0x19540119 #define UFS_CIGAM 0x54190100 /* byteswapped MAGIC */ diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index d8fb1b5d6cb5..257a56b127cf 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -100,10 +100,10 @@ xfs_iozero( STATIC int xfs_file_fsync( struct file *file, - struct dentry *dentry, int datasync) { - struct xfs_inode *ip = XFS_I(dentry->d_inode); + struct inode *inode = file->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); struct xfs_trans *tp; int error = 0; int log_flushed = 0; @@ -140,8 +140,8 @@ xfs_file_fsync( * might gets cleared when the inode gets written out via the AIL * or xfs_iflush_cluster. */ - if (((dentry->d_inode->i_state & I_DIRTY_DATASYNC) || - ((dentry->d_inode->i_state & I_DIRTY_SYNC) && !datasync)) && + if (((inode->i_state & I_DIRTY_DATASYNC) || + ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && ip->i_update_core) { /* * Kick off a transaction to log the inode core to get the @@ -868,7 +868,7 @@ write_retry: mutex_lock(&inode->i_mutex); xfs_ilock(ip, iolock); - error2 = -xfs_file_fsync(file, file->f_path.dentry, + error2 = -xfs_file_fsync(file, (file->f_flags & __O_SYNC) ? 0 : 1); if (!error) error = error2; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 7bf83ddf82e0..baacd98e7cc6 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -373,7 +373,7 @@ struct acpi_pci_root { struct acpi_pci_id id; struct pci_bus *bus; u16 segment; - u8 bus_nr; + struct resource secondary; /* downstream bus range */ u32 osc_support_set; /* _OSC state of support bits */ u32 osc_control_set; /* _OSC state of control bits */ diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h index 4f7b44866b76..23d78b4d088b 100644 --- a/include/acpi/acpi_drivers.h +++ b/include/acpi/acpi_drivers.h @@ -104,8 +104,7 @@ int acpi_pci_bind_root(struct acpi_device *device); /* Arch-defined function to add a bus to the system */ -struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain, - int bus); +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root); void pci_acpi_crs_quirks(void); /* -------------------------------------------------------------------------- diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h deleted file mode 100644 index 63194d03cb2d..000000000000 --- a/include/acpi/acpi_hest.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ACPI_HEST_H -#define __ACPI_HEST_H - -#include - -#ifdef CONFIG_ACPI -extern int acpi_hest_firmware_first_pci(struct pci_dev *pci); -#else -static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; } -#endif - -#endif diff --git a/include/acpi/apei.h b/include/acpi/apei.h new file mode 100644 index 000000000000..b3365025ff8d --- /dev/null +++ b/include/acpi/apei.h @@ -0,0 +1,34 @@ +/* + * apei.h - ACPI Platform Error Interface + */ + +#ifndef ACPI_APEI_H +#define ACPI_APEI_H + +#include +#include +#include + +#define APEI_ERST_INVALID_RECORD_ID 0xffffffffffffffffULL + +#define APEI_ERST_CLEAR_RECORD _IOW('E', 1, u64) +#define APEI_ERST_GET_RECORD_COUNT _IOR('E', 2, u32) + +#ifdef __KERNEL__ + +extern int hest_disable; +extern int erst_disable; + +typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data); +int apei_hest_parse(apei_hest_func_t func, void *data); + +int erst_write(const struct cper_record_header *record); +ssize_t erst_get_record_count(void); +int erst_get_next_record_id(u64 *record_id); +ssize_t erst_read(u64 record_id, struct cper_record_header *record, + size_t buflen); +ssize_t erst_read_next(struct cper_record_header *record, size_t buflen); +int erst_clear(u64 record_id); + +#endif +#endif diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h new file mode 100644 index 000000000000..8b9fb4b0b9ce --- /dev/null +++ b/include/acpi/atomicio.h @@ -0,0 +1,10 @@ +#ifndef ACPI_ATOMIC_IO_H +#define ACPI_ATOMIC_IO_H + +int acpi_pre_map_gar(struct acpi_generic_address *reg); +int acpi_post_unmap_gar(struct acpi_generic_address *reg); + +int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg); +int acpi_atomic_write(u64 val, struct acpi_generic_address *reg); + +#endif diff --git a/include/acpi/hed.h b/include/acpi/hed.h new file mode 100644 index 000000000000..46e1249b70cc --- /dev/null +++ b/include/acpi/hed.h @@ -0,0 +1,18 @@ +/* + * hed.h - ACPI Hardware Error Device + * + * Copyright (C) 2009, Intel Corp. + * Author: Huang Ying + * + * This file is released under the GPLv2. + */ + +#ifndef ACPI_HED_H +#define ACPI_HED_H + +#include + +int register_acpi_hed_notifier(struct notifier_block *nb); +void unregister_acpi_hed_notifier(struct notifier_block *nb); + +#endif diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 86825ddbe14e..da565a48240e 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -52,17 +52,6 @@ struct acpi_power_register { u64 address; } __attribute__ ((packed)); -struct acpi_processor_cx_policy { - u32 count; - struct acpi_processor_cx *state; - struct { - u32 time; - u32 ticks; - u32 count; - u32 bm; - } threshold; -}; - struct acpi_processor_cx { u8 valid; u8 type; @@ -74,8 +63,6 @@ struct acpi_processor_cx { u32 power; u32 usage; u64 time; - struct acpi_processor_cx_policy promotion; - struct acpi_processor_cx_policy demotion; char desc[ACPI_CX_DESC_LEN]; }; diff --git a/include/acpi/video.h b/include/acpi/video.h index cf7be3dd157b..551793c9b6e8 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -1,12 +1,28 @@ #ifndef __ACPI_VIDEO_H #define __ACPI_VIDEO_H +#define ACPI_VIDEO_DISPLAY_CRT 1 +#define ACPI_VIDEO_DISPLAY_TV 2 +#define ACPI_VIDEO_DISPLAY_DVI 3 +#define ACPI_VIDEO_DISPLAY_LCD 4 + +#define ACPI_VIDEO_DISPLAY_LEGACY_MONITOR 0x0100 +#define ACPI_VIDEO_DISPLAY_LEGACY_PANEL 0x0110 +#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200 + #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE) extern int acpi_video_register(void); extern void acpi_video_unregister(void); +extern int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid); #else static inline int acpi_video_register(void) { return 0; } static inline void acpi_video_unregister(void) { return; } +static inline int acpi_video_get_edid(struct acpi_device *device, int type, + int device_id, void **edid) +{ + return -ENODEV; +} #endif #endif diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 69206957b72c..0c80bb38773f 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -123,15 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_cpu) { - ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); - debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); - - } else - dma_sync_single_for_cpu(dev, addr + offset, size, dir); + dma_sync_single_for_cpu(dev, addr + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -140,15 +132,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, size_t size, enum dma_data_direction dir) { - struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (ops->sync_single_range_for_device) { - ops->sync_single_range_for_device(dev, addr, offset, size, dir); - debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); - - } else - dma_sync_single_for_device(dev, addr + offset, size, dir); + dma_sync_single_for_device(dev, addr + offset, size, dir); } static inline void diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index 979c6a57f2f1..4f3d75e1ad39 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -60,7 +60,9 @@ struct module; * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the - * array must be @ngpio entries long. + * array must be @ngpio entries long. A name can include a single printk + * format specifier for an unsigned int. It is substituted by the actual + * number of the gpio. * * A gpio_chip can help platforms abstract various sources of GPIOs so * they can all be accessed through a common programing interface. @@ -88,6 +90,9 @@ struct gpio_chip { unsigned offset); int (*direction_output)(struct gpio_chip *chip, unsigned offset, int value); + int (*set_debounce)(struct gpio_chip *chip, + unsigned offset, unsigned debounce); + void (*set)(struct gpio_chip *chip, unsigned offset, int value); @@ -98,7 +103,7 @@ struct gpio_chip { struct gpio_chip *chip); int base; u16 ngpio; - char **names; + const char *const *names; unsigned can_sleep:1; unsigned exported:1; }; @@ -121,6 +126,8 @@ extern void gpio_free(unsigned gpio); extern int gpio_direction_input(unsigned gpio); extern int gpio_direction_output(unsigned gpio, int value); +extern int gpio_set_debounce(unsigned gpio, unsigned debounce); + extern int gpio_get_value_cansleep(unsigned gpio); extern void gpio_set_value_cansleep(unsigned gpio, int value); diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 04f91c2d3f7b..b5043a9890d8 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void); #ifndef PER_CPU_BASE_SECTION #ifdef CONFIG_SMP -#define PER_CPU_BASE_SECTION ".data.percpu" +#define PER_CPU_BASE_SECTION ".data..percpu" #else #define PER_CPU_BASE_SECTION ".data" #endif @@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void); #define PER_CPU_SHARED_ALIGNED_SECTION "" #define PER_CPU_ALIGNED_SECTION "" #else -#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif -#define PER_CPU_FIRST_SECTION ".first" +#define PER_CPU_FIRST_SECTION "..first" #else #define PER_CPU_SHARED_ALIGNED_SECTION "" -#define PER_CPU_ALIGNED_SECTION ".shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #define PER_CPU_FIRST_SECTION "" #endif diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h index 8b9454496a7c..5de07355fad4 100644 --- a/include/asm-generic/scatterlist.h +++ b/include/asm-generic/scatterlist.h @@ -11,7 +11,9 @@ struct scatterlist { unsigned int offset; unsigned int length; dma_addr_t dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH unsigned int dma_length; +#endif }; /* @@ -22,22 +24,11 @@ struct scatterlist { * is 0. */ #define sg_dma_address(sg) ((sg)->dma_address) -#ifndef sg_dma_len -/* - * Normally, you have an iommu on 64 bit machines, but not on 32 bit - * machines. Architectures that are differnt should override this. - */ -#if __BITS_PER_LONG == 64 + +#ifdef CONFIG_NEED_SG_DMA_LENGTH #define sg_dma_len(sg) ((sg)->dma_length) #else #define sg_dma_len(sg) ((sg)->length) -#endif /* 64 bit */ -#endif /* sg_dma_len */ - -#ifndef ISA_DMA_THRESHOLD -#define ISA_DMA_THRESHOLD (~0UL) #endif -#define ARCH_HAS_SG_CHAIN - #endif /* __ASM_GENERIC_SCATTERLIST_H */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 510df36dd5d4..fd60700503c8 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -34,6 +34,9 @@ #ifndef cpu_to_node #define cpu_to_node(cpu) ((void)(cpu),0) #endif +#ifndef cpu_to_mem +#define cpu_to_mem(cpu) ((void)(cpu),0) +#endif #ifndef parent_node #define parent_node(node) ((void)(node),0) #endif diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0e..48c5299cbf26 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -175,25 +175,25 @@ #define NOSAVE_DATA \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_begin) = .; \ - *(.data.nosave) \ + *(.data..nosave) \ . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__nosave_end) = .; #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data.page_aligned) + *(.data..page_aligned) #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ - *(.data.read_mostly) + *(.data..read_mostly) #define CACHELINE_ALIGNED_DATA(align) \ . = ALIGN(align); \ - *(.data.cacheline_aligned) + *(.data..cacheline_aligned) #define INIT_TASK_DATA(align) \ . = ALIGN(align); \ - *(.data.init_task) + *(.data..init_task) /* * Read only Data @@ -247,10 +247,10 @@ } \ \ /* RapidIO route ops */ \ - .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ - *(.rio_route_ops) \ - VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ + .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ + *(.rio_switch_ops) \ + VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ } \ \ TRACEDATA \ @@ -435,7 +435,7 @@ */ #define INIT_TASK_DATA_SECTION(align) \ . = ALIGN(align); \ - .data.init_task : { \ + .data..init_task : { \ INIT_TASK_DATA(align) \ } @@ -499,7 +499,7 @@ #define BSS(bss_align) \ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ - *(.bss.page_aligned) \ + *(.bss..page_aligned) \ *(.dynbss) \ *(.bss) \ *(COMMON) \ @@ -666,16 +666,16 @@ */ #define PERCPU_VADDR(vaddr, phdr) \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ + .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } phdr \ - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); + . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); /** * PERCPU - define output section for percpu area, simple version @@ -687,18 +687,18 @@ * * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except * that __per_cpu_load is defined as a relative symbol against - * .data.percpu which is required for relocatable x86_32 + * .data..percpu which is required for relocatable x86_32 * configuration. */ #define PERCPU(align) \ . = ALIGN(align); \ - .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__per_cpu_load) = .; \ VMLINUX_SYMBOL(__per_cpu_start) = .; \ - *(.data.percpu.first) \ - *(.data.percpu.page_aligned) \ - *(.data.percpu) \ - *(.data.percpu.shared_aligned) \ + *(.data..percpu..first) \ + *(.data..percpu..page_aligned) \ + *(.data..percpu) \ + *(.data..percpu..shared_aligned) \ VMLINUX_SYMBOL(__per_cpu_end) = .; \ } diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3da73f5f0ae9..224a38c960d4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -248,11 +248,12 @@ int acpi_check_region(resource_size_t start, resource_size_t n, int acpi_check_mem_region(resource_size_t start, resource_size_t n, const char *name); +int acpi_resources_are_enforced(void); + #ifdef CONFIG_PM_SLEEP void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); void __init acpi_s4_no_nvs(void); -void __init acpi_set_sci_en_on_resume(void); #endif /* CONFIG_PM_SLEEP */ struct acpi_osc_context { diff --git a/include/linux/aio.h b/include/linux/aio.h index 811dbb369379..7a8db4155281 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb); extern int aio_complete(struct kiocb *iocb, long res, long res2); struct mm_struct; extern void exit_aio(struct mm_struct *mm); +extern long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user *__user *iocbpp, bool compat); #else static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } static inline int aio_put_req(struct kiocb *iocb) { return 0; } @@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { } static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } struct mm_struct; static inline void exit_aio(struct mm_struct *mm) { } +static inline long do_io_submit(aio_context_t ctx_id, long nr, + struct iocb __user * __user *iocbpp, + bool compat) { return 0; } #endif /* CONFIG_AIO */ static inline struct kiocb *list_kiocb(struct list_head *h) diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h new file mode 100644 index 000000000000..cbee7de7dd36 --- /dev/null +++ b/include/linux/amba/pl330.h @@ -0,0 +1,45 @@ +/* linux/include/linux/amba/pl330.h + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __AMBA_PL330_H_ +#define __AMBA_PL330_H_ + +#include + +struct dma_pl330_peri { + /* + * Peri_Req i/f of the DMAC that is + * peripheral could be reached from. + */ + u8 peri_id; /* {0, 31} */ + enum pl330_reqtype rqtype; + + /* For M->D and D->M Channels */ + int burst_sz; /* in power of 2 */ + dma_addr_t fifo_addr; +}; + +struct dma_pl330_platdata { + /* + * Number of valid peripherals connected to DMAC. + * This may be different from the value read from + * CR0, as the PL330 implementation might have 'holes' + * in the peri list or the peri could also be reached + * from another DMAC which the platform prefers. + */ + u8 nr_valid_peri; + /* Array of valid peripherals */ + struct dma_pl330_peri *peri; + /* Bytes to allocate for MC buffer */ + unsigned mcbuf_sz; +}; + +#endif /* __AMBA_PL330_H_ */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 16ed0284d780..1b9ba193b789 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -203,6 +203,9 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, unsigned long from); +int block_write_begin_newtrunc(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page **, void **, get_block_t*); int block_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); @@ -214,6 +217,9 @@ int generic_write_end(struct file *, struct address_space *, struct page *, void *); void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); +int cont_write_begin_newtrunc(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page **, void **, + get_block_t *, loff_t *); int cont_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t *, loff_t *); @@ -224,7 +230,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, void block_sync_page(struct page *); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int block_truncate_page(struct address_space *, loff_t, get_block_t *); -int file_fsync(struct file *, struct dentry *, int); +int file_fsync(struct file *, int); +int nobh_write_begin_newtrunc(struct file *, struct address_space *, + loff_t, unsigned, unsigned, + struct page **, void **, get_block_t*); int nobh_write_begin(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page **, void **, get_block_t*); diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c6..4c570653ab84 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@ #ifndef __cacheline_aligned #define __cacheline_aligned \ __attribute__((__aligned__(SMP_CACHE_BYTES), \ - __section__(".data.cacheline_aligned"))) + __section__(".data..cacheline_aligned"))) #endif /* __cacheline_aligned */ #ifndef __cacheline_aligned_in_smp diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 8f78073d7caa..0c621604baa1 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -397,7 +397,7 @@ struct cftype { * This callback must be implemented, if you want provide * notification functionality. */ - int (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, + void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, struct eventfd_ctx *eventfd); }; diff --git a/include/linux/compat.h b/include/linux/compat.h index 717c691ecd8e..168f7daa7bde 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, int flags, int mode); +extern ssize_t compat_rw_copy_check_uvector(int type, + const struct compat_iovec __user *uvector, unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h index 4a6b604ef7e4..51e3145196f6 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -83,6 +83,8 @@ extern unsigned long wait_for_completion_timeout(struct completion *x, unsigned long timeout); extern unsigned long wait_for_completion_interruptible_timeout( struct completion *x, unsigned long timeout); +extern unsigned long wait_for_completion_killable_timeout( + struct completion *x, unsigned long timeout); extern bool try_wait_for_completion(struct completion *x); extern bool completion_done(struct completion *x); diff --git a/include/linux/cper.h b/include/linux/cper.h new file mode 100644 index 000000000000..4b38f905b705 --- /dev/null +++ b/include/linux/cper.h @@ -0,0 +1,314 @@ +/* + * UEFI Common Platform Error Record + * + * Copyright (C) 2010, Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef LINUX_CPER_H +#define LINUX_CPER_H + +#include + +/* CPER record signature and the size */ +#define CPER_SIG_RECORD "CPER" +#define CPER_SIG_SIZE 4 +/* Used in signature_end field in struct cper_record_header */ +#define CPER_SIG_END 0xffffffff + +/* + * CPER record header revision, used in revision field in struct + * cper_record_header + */ +#define CPER_RECORD_REV 0x0100 + +/* + * Severity difinition for error_severity in struct cper_record_header + * and section_severity in struct cper_section_descriptor + */ +#define CPER_SER_RECOVERABLE 0x0 +#define CPER_SER_FATAL 0x1 +#define CPER_SER_CORRECTED 0x2 +#define CPER_SER_INFORMATIONAL 0x3 + +/* + * Validation bits difinition for validation_bits in struct + * cper_record_header. If set, corresponding fields in struct + * cper_record_header contain valid information. + * + * corresponds platform_id + */ +#define CPER_VALID_PLATFORM_ID 0x0001 +/* corresponds timestamp */ +#define CPER_VALID_TIMESTAMP 0x0002 +/* corresponds partition_id */ +#define CPER_VALID_PARTITION_ID 0x0004 + +/* + * Notification type used to generate error record, used in + * notification_type in struct cper_record_header + * + * Corrected Machine Check + */ +#define CPER_NOTIFY_CMC \ + UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +/* Corrected Platform Error */ +#define CPER_NOTIFY_CPE \ + UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \ + 0xF2, 0x7E, 0xBE, 0xEE) +/* Machine Check Exception */ +#define CPER_NOTIFY_MCE \ + UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +/* PCI Express Error */ +#define CPER_NOTIFY_PCIE \ + UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \ + 0xAF, 0x67, 0xC1, 0x04) +/* INIT Record (for IPF) */ +#define CPER_NOTIFY_INIT \ + UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \ + 0xD3, 0x9B, 0xC9, 0x8E) +/* Non-Maskable Interrupt */ +#define CPER_NOTIFY_NMI \ + UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \ + 0x85, 0xD6, 0xE9, 0x8A) +/* BOOT Error Record */ +#define CPER_NOTIFY_BOOT \ + UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) +/* DMA Remapping Error */ +#define CPER_NOTIFY_DMAR \ + UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \ + 0x72, 0x2D, 0xEB, 0x41) + +/* + * Flags bits definitions for flags in struct cper_record_header + * If set, the error has been recovered + */ +#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1 +/* If set, the error is for previous boot */ +#define CPER_HW_ERROR_FLAGS_PREVERR 0x2 +/* If set, the error is injected for testing */ +#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4 + +/* + * CPER section header revision, used in revision field in struct + * cper_section_descriptor + */ +#define CPER_SEC_REV 0x0100 + +/* + * Validation bits difinition for validation_bits in struct + * cper_section_descriptor. If set, corresponding fields in struct + * cper_section_descriptor contain valid information. + * + * corresponds fru_id + */ +#define CPER_SEC_VALID_FRU_ID 0x1 +/* corresponds fru_text */ +#define CPER_SEC_VALID_FRU_TEXT 0x2 + +/* + * Flags bits definitions for flags in struct cper_section_descriptor + * + * If set, the section is associated with the error condition + * directly, and should be focused on + */ +#define CPER_SEC_PRIMARY 0x0001 +/* + * If set, the error was not contained within the processor or memory + * hierarchy and the error may have propagated to persistent storage + * or network + */ +#define CPER_SEC_CONTAINMENT_WARNING 0x0002 +/* If set, the component must be re-initialized or re-enabled prior to use */ +#define CPER_SEC_RESET 0x0004 +/* If set, Linux may choose to discontinue use of the resource */ +#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008 +/* + * If set, resource could not be queried for error information due to + * conflicts with other system software or resources. Some fields of + * the section will be invalid + */ +#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010 +/* + * If set, action has been taken to ensure error containment (such as + * poisoning data), but the error has not been fully corrected and the + * data has not been consumed. Linux may choose to take further + * corrective action before the data is consumed + */ +#define CPER_SEC_LATENT_ERROR 0x0020 + +/* + * Section type definitions, used in section_type field in struct + * cper_section_descriptor + * + * Processor Generic + */ +#define CPER_SEC_PROC_GENERIC \ + UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ + 0x93, 0xC4, 0xF3, 0xDB) +/* Processor Specific: X86/X86_64 */ +#define CPER_SEC_PROC_IA \ + UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) +/* Processor Specific: IA64 */ +#define CPER_SEC_PROC_IPF \ + UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \ + 0x80, 0xC7, 0x3C, 0x88, 0x81) +/* Platform Memory */ +#define CPER_SEC_PLATFORM_MEM \ + UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ + 0xED, 0x7C, 0x83, 0xB1) +#define CPER_SEC_PCIE \ + UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \ + 0xCB, 0x3C, 0x6F, 0x35) +/* Firmware Error Record Reference */ +#define CPER_SEC_FW_ERR_REC_REF \ + UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \ + 0x9C, 0x8E, 0x69, 0xED) +/* PCI/PCI-X Bus */ +#define CPER_SEC_PCI_X_BUS \ + UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \ + 0xD3, 0xF9, 0xC9, 0xDD) +/* PCI Component/Device */ +#define CPER_SEC_PCI_DEV \ + UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \ + 0x8B, 0x00, 0x13, 0x26) +#define CPER_SEC_DMAR_GENERIC \ + UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \ + 0xDE, 0x3E, 0x2C, 0x64) +/* Intel VT for Directed I/O specific DMAr */ +#define CPER_SEC_DMAR_VT \ + UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \ + 0xDD, 0x93, 0xE8, 0xCF) +/* IOMMU specific DMAr */ +#define CPER_SEC_DMAR_IOMMU \ + UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ + 0xDF, 0xAA, 0x84, 0xEC) + +/* + * All tables and structs must be byte-packed to match CPER + * specification, since the tables are provided by the system BIOS + */ +#pragma pack(1) + +struct cper_record_header { + char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ + __u16 revision; /* must be CPER_RECORD_REV */ + __u32 signature_end; /* must be CPER_SIG_END */ + __u16 section_count; + __u32 error_severity; + __u32 validation_bits; + __u32 record_length; + __u64 timestamp; + uuid_le platform_id; + uuid_le partition_id; + uuid_le creator_id; + uuid_le notification_type; + __u64 record_id; + __u32 flags; + __u64 persistence_information; + __u8 reserved[12]; /* must be zero */ +}; + +struct cper_section_descriptor { + __u32 section_offset; /* Offset in bytes of the + * section body from the base + * of the record header */ + __u32 section_length; + __u16 revision; /* must be CPER_RECORD_REV */ + __u8 validation_bits; + __u8 reserved; /* must be zero */ + __u32 flags; + uuid_le section_type; + uuid_le fru_id; + __u32 section_severity; + __u8 fru_text[20]; +}; + +/* Generic Processor Error Section */ +struct cper_sec_proc_generic { + __u64 validation_bits; + __u8 proc_type; + __u8 proc_isa; + __u8 proc_error_type; + __u8 operation; + __u8 flags; + __u8 level; + __u16 reserved; + __u64 cpu_version; + char cpu_brand[128]; + __u64 proc_id; + __u64 target_addr; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Error Section */ +struct cper_sec_proc_ia { + __u64 validation_bits; + __u8 lapic_id; + __u8 cpuid[48]; +}; + +/* IA32/X64 Processor Error Infomation Structure */ +struct cper_ia_err_info { + uuid_le err_type; + __u64 validation_bits; + __u64 check_info; + __u64 target_id; + __u64 requestor_id; + __u64 responder_id; + __u64 ip; +}; + +/* IA32/X64 Processor Context Information Structure */ +struct cper_ia_proc_ctx { + __u16 reg_ctx_type; + __u16 reg_arr_size; + __u32 msr_addr; + __u64 mm_reg_addr; +}; + +/* Memory Error Section */ +struct cper_sec_mem_err { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; +}; + +/* Reset to default packing */ +#pragma pack() + +u64 cper_next_record_id(void); + +#endif diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index dcf77fa826b5..55215cce5005 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -125,6 +125,7 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern int cpuidle_register_driver(struct cpuidle_driver *drv); +struct cpuidle_driver *cpuidle_get_driver(void); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern int cpuidle_register_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev); @@ -137,16 +138,17 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); #else static inline int cpuidle_register_driver(struct cpuidle_driver *drv) -{return 0;} +{return -ENODEV; } +static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } static inline int cpuidle_register_device(struct cpuidle_device *dev) -{return 0;} +{return -ENODEV; } static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } static inline void cpuidle_pause_and_lock(void) { } static inline void cpuidle_resume_and_unlock(void) { } static inline int cpuidle_enable_device(struct cpuidle_device *dev) -{return 0;} +{return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } #endif diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 20b51cab6593..457ed765a116 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -69,6 +69,7 @@ extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); extern int cpuset_mem_spread_node(void); +extern int cpuset_slab_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { @@ -194,6 +195,11 @@ static inline int cpuset_mem_spread_node(void) return 0; } +static inline int cpuset_slab_spread_node(void) +{ + return 0; +} + static inline int cpuset_do_page_mem_spread(void) { return 0; diff --git a/include/linux/cred.h b/include/linux/cred.h index 52507c3e1387..75c0fa881308 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -156,7 +156,6 @@ extern int copy_creds(struct task_struct *, unsigned long); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); -extern struct cred *prepare_usermodehelper_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); extern const struct cred *override_creds(const struct cred *); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index fc1b930f246c..e7d9b20ddc5b 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -63,6 +63,8 @@ struct dentry *debugfs_create_x16(const char *name, mode_t mode, struct dentry *parent, u16 *value); struct dentry *debugfs_create_x32(const char *name, mode_t mode, struct dentry *parent, u32 *value); +struct dentry *debugfs_create_x64(const char *name, mode_t mode, + struct dentry *parent, u64 *value); struct dentry *debugfs_create_size_t(const char *name, mode_t mode, struct dentry *parent, size_t *value); struct dentry *debugfs_create_bool(const char *name, mode_t mode, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ca32ed78b057..89b7e1a605b8 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -40,16 +40,6 @@ struct dma_map_ops { void (*sync_single_for_device)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); - void (*sync_single_range_for_cpu)(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir); - void (*sync_single_range_for_device)(struct device *dev, - dma_addr_t dma_handle, - unsigned long offset, - size_t size, - enum dma_data_direction dir); void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); @@ -105,21 +95,6 @@ static inline int is_device_dma_capable(struct device *dev) #include #endif -/* for backwards compatibility, removed soon */ -static inline void __deprecated dma_sync_single(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir) -{ - dma_sync_single_for_cpu(dev, addr, size, dir); -} - -static inline void __deprecated dma_sync_sg(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction dir) -{ - dma_sync_sg_for_cpu(dev, sg, nelems, dir); -} - static inline u64 dma_get_mask(struct device *dev) { if (dev && dev->dma_mask && *dev->dma_mask) diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 5f494b465097..7fc62d4550b2 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -868,7 +868,7 @@ extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, extern void ext3_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ -extern int ext3_sync_file (struct file *, struct dentry *, int); +extern int ext3_sync_file(struct file *, int); /* hash.c */ extern int ext3fs_dirhash(const char *name, int len, struct diff --git a/include/linux/fb.h b/include/linux/fb.h index f3793ebc241c..907ace3a64c8 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -4,8 +4,6 @@ #include #include -struct dentry; - /* Definitions of frame buffers */ #define FB_MAX 32 /* sufficient for now */ @@ -1017,8 +1015,7 @@ extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); -extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, - int datasync); +extern int fb_deferred_io_fsync(struct file *file, int datasync); static inline bool fb_be_math(struct fb_info *info) { diff --git a/include/linux/file.h b/include/linux/file.h index 5555508fd517..b1e12970f617 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -11,7 +11,6 @@ struct file; -extern void __fput(struct file *); extern void fput(struct file *); extern void drop_file_write_access(struct file *file); diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 4bd94bf5e739..72e2b8ac2a5a 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -55,13 +55,11 @@ #define CSR_DESCRIPTOR 0x01 #define CSR_VENDOR 0x03 #define CSR_HARDWARE_VERSION 0x04 -#define CSR_NODE_CAPABILITIES 0x0c #define CSR_UNIT 0x11 #define CSR_SPECIFIER_ID 0x12 #define CSR_VERSION 0x13 #define CSR_DEPENDENT_INFO 0x14 #define CSR_MODEL 0x17 -#define CSR_INSTANCE 0x18 #define CSR_DIRECTORY_ID 0x20 struct fw_csr_iterator { @@ -89,7 +87,6 @@ struct fw_card { int current_tlabel; u64 tlabel_mask; struct list_head transaction_list; - struct timer_list flush_timer; unsigned long reset_jiffies; unsigned long long guid; @@ -290,6 +287,8 @@ struct fw_transaction { int tlabel; int timestamp; struct list_head link; + struct fw_card *card; + struct timer_list split_timeout_timer; struct fw_packet packet; diff --git a/include/linux/fs.h b/include/linux/fs.h index b336cb9ca9a0..3428393942a6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -954,6 +954,7 @@ extern spinlock_t files_lock; #define file_list_unlock() spin_unlock(&files_lock); #define get_file(x) atomic_long_inc(&(x)->f_count) +#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) #define file_count(x) atomic_long_read(&(x)->f_count) #ifdef CONFIG_DEBUG_WRITECOUNT @@ -1497,7 +1498,7 @@ struct file_operations { int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, struct dentry *, int datasync); + int (*fsync) (struct file *, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); @@ -2212,7 +2213,7 @@ extern int generic_segment_checks(const struct iovec *iov, /* fs/block_dev.c */ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); -extern int blkdev_fsync(struct file *filp, struct dentry *dentry, int datasync); +extern int blkdev_fsync(struct file *filp, int datasync); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -2228,6 +2229,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); +extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); extern loff_t no_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset, @@ -2250,10 +2252,19 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from) #endif #ifdef CONFIG_BLOCK +struct bio; +typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, + loff_t file_offset); +void dio_end_io(struct bio *bio, int error); + +ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int lock_type); ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, - int lock_type); + dio_submit_t submit_io, int lock_type); enum { /* need locking between buffered and direct access */ @@ -2263,13 +2274,31 @@ enum { DIO_SKIP_HOLES = 0x02, }; +static inline ssize_t blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, + struct inode *inode, struct block_device *bdev, const struct iovec *iov, + loff_t offset, unsigned long nr_segs, get_block_t get_block, + dio_iodone_t end_io) +{ + return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset, + nr_segs, get_block, end_io, NULL, + DIO_LOCKING | DIO_SKIP_HOLES); +} + +static inline ssize_t blockdev_direct_IO_no_locking_newtrunc(int rw, struct kiocb *iocb, + struct inode *inode, struct block_device *bdev, const struct iovec *iov, + loff_t offset, unsigned long nr_segs, get_block_t get_block, + dio_iodone_t end_io) +{ + return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset, + nr_segs, get_block, end_io, NULL, 0); +} static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_block, end_io, + nr_segs, get_block, end_io, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } @@ -2279,7 +2308,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, dio_iodone_t end_io) { return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_block, end_io, 0); + nr_segs, get_block, end_io, NULL, 0); } #endif @@ -2335,13 +2364,15 @@ extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); extern loff_t dcache_dir_lseek(struct file *, loff_t, int); extern int dcache_readdir(struct file *, void *, filldir_t); +extern int simple_setattr(struct dentry *, struct iattr *); extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int simple_statfs(struct dentry *, struct kstatfs *); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -extern int simple_sync_file(struct file *, struct dentry *, int); +extern int simple_setsize(struct inode *, loff_t); +extern int noop_fsync(struct file *, int); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, @@ -2366,7 +2397,7 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); -extern int simple_fsync(struct file *, struct dentry *, int); +extern int generic_file_fsync(struct file *, int); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, @@ -2377,7 +2408,8 @@ extern int buffer_migrate_page(struct address_space *, extern int inode_change_ok(const struct inode *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); -extern int __must_check inode_setattr(struct inode *, struct iattr *); +extern int __must_check inode_setattr(struct inode *, const struct iattr *); +extern void generic_setattr(struct inode *inode, const struct iattr *attr); extern void file_update_time(struct file *file); diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index c082f223e2fe..3167f2df4126 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -73,18 +73,25 @@ struct trace_iterator { }; +struct trace_event; + typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, - int flags); -struct trace_event { - struct hlist_node node; - struct list_head list; - int type; + int flags, struct trace_event *event); + +struct trace_event_functions { trace_print_func trace; trace_print_func raw; trace_print_func hex; trace_print_func binary; }; +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + struct trace_event_functions *funcs; +}; + extern int register_ftrace_event(struct trace_event *event); extern int unregister_ftrace_event(struct trace_event *event); @@ -116,28 +123,70 @@ void tracing_record_cmdline(struct task_struct *tsk); struct event_filter; +enum trace_reg { + TRACE_REG_REGISTER, + TRACE_REG_UNREGISTER, + TRACE_REG_PERF_REGISTER, + TRACE_REG_PERF_UNREGISTER, +}; + +struct ftrace_event_call; + +struct ftrace_event_class { + char *system; + void *probe; +#ifdef CONFIG_PERF_EVENTS + void *perf_probe; +#endif + int (*reg)(struct ftrace_event_call *event, + enum trace_reg type); + int (*define_fields)(struct ftrace_event_call *); + struct list_head *(*get_fields)(struct ftrace_event_call *); + struct list_head fields; + int (*raw_init)(struct ftrace_event_call *); +}; + +enum { + TRACE_EVENT_FL_ENABLED_BIT, + TRACE_EVENT_FL_FILTERED_BIT, +}; + +enum { + TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), + TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), +}; + struct ftrace_event_call { struct list_head list; + struct ftrace_event_class *class; char *name; - char *system; struct dentry *dir; - struct trace_event *event; - int enabled; - int (*regfunc)(struct ftrace_event_call *); - void (*unregfunc)(struct ftrace_event_call *); - int id; + struct trace_event event; const char *print_fmt; - int (*raw_init)(struct ftrace_event_call *); - int (*define_fields)(struct ftrace_event_call *); - struct list_head fields; - int filter_active; struct event_filter *filter; void *mod; void *data; + /* + * 32 bit flags: + * bit 1: enabled + * bit 2: filter_active + * + * Changes to flags must hold the event_mutex. + * + * Note: Reads of flags do not hold the event_mutex since + * they occur in critical sections. But the way flags + * is currently used, these changes do no affect the code + * except that when a change is made, it may have a slight + * delay in propagating the changes to other CPUs due to + * caching and such. + */ + unsigned int flags; + +#ifdef CONFIG_PERF_EVENTS int perf_refcount; - int (*perf_event_enable)(struct ftrace_event_call *); - void (*perf_event_disable)(struct ftrace_event_call *); + struct hlist_head *perf_events; +#endif }; #define PERF_MAX_TRACE_SIZE 2048 @@ -194,24 +243,22 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int perf_trace_enable(int event_id); -extern void perf_trace_disable(int event_id); -extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, +extern int perf_trace_init(struct perf_event *event); +extern void perf_trace_destroy(struct perf_event *event); +extern int perf_trace_enable(struct perf_event *event); +extern void perf_trace_disable(struct perf_event *event); +extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); -extern void * -perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, - unsigned long *irq_flags); +extern void *perf_trace_buf_prepare(int size, unsigned short type, + struct pt_regs *regs, int *rctxp); static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags, struct pt_regs *regs) + u64 count, struct pt_regs *regs, void *head) { - struct trace_entry *entry = raw_data; - - perf_tp_event(entry->type, addr, count, raw_data, size, regs); + perf_tp_event(addr, count, raw_data, size, regs, head); perf_swevent_put_recursion_context(rctx); - local_irq_restore(irq_flags); } #endif diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 3e2925a34bf0..88e0eb596919 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -34,6 +34,9 @@ * 7.13 * - make max number of background requests and congestion threshold * tunables + * + * 7.14 + * - add splice support to fuse device */ #ifndef _LINUX_FUSE_H @@ -65,7 +68,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 13 +#define FUSE_KERNEL_MINOR_VERSION 14 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 4e949a5b5b85..03f616b78cfa 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -51,6 +51,11 @@ static inline int gpio_direction_output(unsigned gpio, int value) return -ENOSYS; } +static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) +{ + return -ENOSYS; +} + static inline int gpio_get_value(unsigned gpio) { /* GPIO can never have been requested or set as {in,out}put */ diff --git a/include/linux/i2c/adp8860.h b/include/linux/i2c/adp8860.h new file mode 100644 index 000000000000..0b4d39855c91 --- /dev/null +++ b/include/linux/i2c/adp8860.h @@ -0,0 +1,154 @@ +/* + * Definitions and platform data for Analog Devices + * Backlight drivers ADP8860 + * + * Copyright 2009-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __LINUX_I2C_ADP8860_H +#define __LINUX_I2C_ADP8860_H + +#include +#include + +#define ID_ADP8860 8860 + +#define ADP8860_MAX_BRIGHTNESS 0x7F +#define FLAG_OFFT_SHIFT 8 + +/* + * LEDs subdevice platform data + */ + +#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT) +#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT) + +#define ADP8860_LED_ONT_200ms 0 +#define ADP8860_LED_ONT_600ms 1 +#define ADP8860_LED_ONT_800ms 2 +#define ADP8860_LED_ONT_1200ms 3 + +#define ADP8860_LED_D7 (7) +#define ADP8860_LED_D6 (6) +#define ADP8860_LED_D5 (5) +#define ADP8860_LED_D4 (4) +#define ADP8860_LED_D3 (3) +#define ADP8860_LED_D2 (2) +#define ADP8860_LED_D1 (1) + +/* + * Backlight subdevice platform data + */ + +#define ADP8860_BL_D7 (1 << 6) +#define ADP8860_BL_D6 (1 << 5) +#define ADP8860_BL_D5 (1 << 4) +#define ADP8860_BL_D4 (1 << 3) +#define ADP8860_BL_D3 (1 << 2) +#define ADP8860_BL_D2 (1 << 1) +#define ADP8860_BL_D1 (1 << 0) + +#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */ +#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */ +#define ADP8860_FADE_T_600ms 2 +#define ADP8860_FADE_T_900ms 3 +#define ADP8860_FADE_T_1200ms 4 +#define ADP8860_FADE_T_1500ms 5 +#define ADP8860_FADE_T_1800ms 6 +#define ADP8860_FADE_T_2100ms 7 +#define ADP8860_FADE_T_2400ms 8 +#define ADP8860_FADE_T_2700ms 9 +#define ADP8860_FADE_T_3000ms 10 +#define ADP8860_FADE_T_3500ms 11 +#define ADP8860_FADE_T_4000ms 12 +#define ADP8860_FADE_T_4500ms 13 +#define ADP8860_FADE_T_5000ms 14 +#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */ + +#define ADP8860_FADE_LAW_LINEAR 0 +#define ADP8860_FADE_LAW_SQUARE 1 +#define ADP8860_FADE_LAW_CUBIC1 2 +#define ADP8860_FADE_LAW_CUBIC2 3 + +#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */ +#define ADP8860_BL_AMBL_FILT_160ms 1 +#define ADP8860_BL_AMBL_FILT_320ms 2 +#define ADP8860_BL_AMBL_FILT_640ms 3 +#define ADP8860_BL_AMBL_FILT_1280ms 4 +#define ADP8860_BL_AMBL_FILT_2560ms 5 +#define ADP8860_BL_AMBL_FILT_5120ms 6 +#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */ + +/* + * Blacklight current 0..30mA + */ +#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30) + +/* + * L2 comparator current 0..1106uA + */ +#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106) + +/* + * L3 comparator current 0..138uA + */ +#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138) + +struct adp8860_backlight_platform_data { + u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */ + + u8 bl_fade_in; /* Backlight Fade-In Timer */ + u8 bl_fade_out; /* Backlight Fade-Out Timer */ + u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */ + + u8 en_ambl_sens; /* 1 = enable ambient light sensor */ + u8 abml_filt; /* Light sensor filter time */ + + u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */ + + /** + * Independent Current Sinks / LEDS + * Sinks not assigned to the Backlight can be exposed to + * user space using the LEDS CLASS interface + */ + + int num_leds; + struct led_info *leds; + u8 led_fade_in; /* LED Fade-In Timer */ + u8 led_fade_out; /* LED Fade-Out Timer */ + u8 led_fade_law; /* fade-on/fade-off transfer characteristic */ + u8 led_on_time; + + /** + * Gain down disable. Setting this option does not allow the + * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860 + * 1 = the charge pump doesn't switch down in gain until all LEDs are 0. + * The charge pump switches up in gain as needed. This feature is + * useful if the ADP8863 charge pump is used to drive an external load. + * This feature must be used when utilizing small fly capacitors + * (0402 or smaller). + * 0 = the charge pump automatically switches up and down in gain. + * This provides optimal efficiency, but is not suitable for driving + * loads that are not connected through the ADP8863 diode drivers. + * Additionally, the charge pump fly capacitors should be low ESR + * and sized 0603 or greater. + */ + + u8 gdwn_dis; +}; + +#endif /* __LINUX_I2C_ADP8860_H */ diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h index e10336631c62..c04bac8bf2fe 100644 --- a/include/linux/i2c/max732x.h +++ b/include/linux/i2c/max732x.h @@ -7,6 +7,9 @@ struct max732x_platform_data { /* number of the first GPIO */ unsigned gpio_base; + /* interrupt base */ + int irq_base; + void *context; /* param to setup/teardown */ int (*setup)(struct i2c_client *client, diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h index d5c5a60c8a0b..139ba52667c8 100644 --- a/include/linux/i2c/pca953x.h +++ b/include/linux/i2c/pca953x.h @@ -24,7 +24,7 @@ struct pca953x_platform_data { int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); - char **names; + const char *const *names; }; #endif /* _LINUX_PCA953X_H */ diff --git a/include/linux/init.h b/include/linux/init.h index ab1d31f9352b..de994304e0bb 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline); #endif /* Data marked not to be saved by software suspend */ -#define __nosavedata __section(.data.nosave) +#define __nosavedata __section(.data..nosave) /* This means "can be init if no module support, otherwise module load may call it." */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 7996fc2c9ba9..1f43fa56f600 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -16,7 +16,7 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; #define INIT_SIGNALS(sig) { \ - .count = ATOMIC_INIT(1), \ + .nr_threads = 1, \ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ .shared_pending = { \ .list = LIST_HEAD_INIT(sig.shared_pending.list), \ @@ -35,7 +35,7 @@ extern struct nsproxy init_nsproxy; #define INIT_SIGHAND(sighand) { \ .count = ATOMIC_INIT(1), \ - .action = { { { .sa_handler = NULL, } }, }, \ + .action = { { { .sa_handler = SIG_DFL, } }, }, \ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \ } @@ -45,9 +45,9 @@ extern struct group_info init_groups; #define INIT_STRUCT_PID { \ .count = ATOMIC_INIT(1), \ .tasks = { \ - { .first = &init_task.pids[PIDTYPE_PID].node }, \ - { .first = &init_task.pids[PIDTYPE_PGID].node }, \ - { .first = &init_task.pids[PIDTYPE_SID].node }, \ + { .first = NULL }, \ + { .first = NULL }, \ + { .first = NULL }, \ }, \ .level = 0, \ .numbers = { { \ @@ -61,7 +61,7 @@ extern struct group_info init_groups; { \ .node = { \ .next = NULL, \ - .pprev = &init_struct_pid.tasks[type].first, \ + .pprev = NULL, \ }, \ .pid = &init_struct_pid, \ } @@ -163,6 +163,7 @@ extern struct cred init_cred; [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ }, \ + .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ INIT_PERF_EVENTS(tsk) \ @@ -182,7 +183,7 @@ extern struct cred init_cred; } /* Attach to the init_task data structure for proper alignment */ -#define __init_task_data __attribute__((__section__(".data.init_task"))) +#define __init_task_data __attribute__((__section__(".data..init_task"))) #endif diff --git a/include/linux/input.h b/include/linux/input.h index 83524e4f3290..6fcc9101beeb 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -1155,7 +1155,7 @@ struct input_dev { int sync; - int abs[ABS_MAX + 1]; + int abs[ABS_CNT]; int rep[REP_MAX + 1]; unsigned long key[BITS_TO_LONGS(KEY_CNT)]; @@ -1163,11 +1163,11 @@ struct input_dev { unsigned long snd[BITS_TO_LONGS(SND_CNT)]; unsigned long sw[BITS_TO_LONGS(SW_CNT)]; - int absmax[ABS_MAX + 1]; - int absmin[ABS_MAX + 1]; - int absfuzz[ABS_MAX + 1]; - int absflat[ABS_MAX + 1]; - int absres[ABS_MAX + 1]; + int absmax[ABS_CNT]; + int absmin[ABS_CNT]; + int absfuzz[ABS_CNT]; + int absflat[ABS_CNT]; + int absres[ABS_CNT]; int (*open)(struct input_dev *dev); void (*close)(struct input_dev *dev); diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h new file mode 100644 index 000000000000..ab1440313924 --- /dev/null +++ b/include/linux/input/tps6507x-ts.h @@ -0,0 +1,24 @@ +/* linux/i2c/tps6507x-ts.h + * + * Functions to access TPS65070 touch screen chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_I2C_TPS6507X_TS_H +#define __LINUX_I2C_TPS6507X_TS_H + +/* Board specific touch screen initial values */ +struct touchscreen_init_data { + int poll_period; /* ms */ + int vref; /* non-zero to leave vref on */ + __u16 min_pressure; /* min reading to be treated as a touch */ + __u16 vendor; + __u16 product; + __u16 version; +}; + +#endif /* __LINUX_I2C_TPS6507X_TS_H */ diff --git a/include/linux/joystick.h b/include/linux/joystick.h index 9e20c29c1e14..47199b13e0eb 100644 --- a/include/linux/joystick.h +++ b/include/linux/joystick.h @@ -64,8 +64,8 @@ struct js_event { #define JSIOCSCORR _IOW('j', 0x21, struct js_corr) /* set correction values */ #define JSIOCGCORR _IOR('j', 0x22, struct js_corr) /* get correction values */ -#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_MAX + 1]) /* set axis mapping */ -#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_MAX + 1]) /* get axis mapping */ +#define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_CNT]) /* set axis mapping */ +#define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_CNT]) /* get axis mapping */ #define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */ #define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */ diff --git a/include/linux/kmod.h b/include/linux/kmod.h index facb27fe7de0..6efd7a78de6a 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -23,6 +23,7 @@ #include #include #include +#include #define KMOD_PATH_LEN 256 @@ -45,19 +46,6 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; struct key; struct file; -struct subprocess_info; - -/* Allocate a subprocess_info structure */ -struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, - char **envp, gfp_t gfp_mask); - -/* Set various pieces of state into the subprocess_info structure */ -void call_usermodehelper_setkeys(struct subprocess_info *info, - struct key *session_keyring); -int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, - struct file **filp); -void call_usermodehelper_setcleanup(struct subprocess_info *info, - void (*cleanup)(char **argv, char **envp)); enum umh_wait { UMH_NO_WAIT = -1, /* don't wait at all */ @@ -65,6 +53,29 @@ enum umh_wait { UMH_WAIT_PROC = 1, /* wait for the process to complete */ }; +struct subprocess_info { + struct work_struct work; + struct completion *complete; + char *path; + char **argv; + char **envp; + enum umh_wait wait; + int retval; + int (*init)(struct subprocess_info *info); + void (*cleanup)(struct subprocess_info *info); + void *data; +}; + +/* Allocate a subprocess_info structure */ +struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, + char **envp, gfp_t gfp_mask); + +/* Set various pieces of state into the subprocess_info structure */ +void call_usermodehelper_setfns(struct subprocess_info *info, + int (*init)(struct subprocess_info *info), + void (*cleanup)(struct subprocess_info *info), + void *data); + /* Actually execute the sub-process */ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); @@ -73,38 +84,33 @@ int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait); void call_usermodehelper_freeinfo(struct subprocess_info *info); static inline int -call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) +call_usermodehelper_fns(char *path, char **argv, char **envp, + enum umh_wait wait, + int (*init)(struct subprocess_info *info), + void (*cleanup)(struct subprocess_info *), void *data) { struct subprocess_info *info; gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; info = call_usermodehelper_setup(path, argv, envp, gfp_mask); + if (info == NULL) return -ENOMEM; + + call_usermodehelper_setfns(info, init, cleanup, data); + return call_usermodehelper_exec(info, wait); } static inline int -call_usermodehelper_keys(char *path, char **argv, char **envp, - struct key *session_keyring, enum umh_wait wait) +call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait) { - struct subprocess_info *info; - gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - - info = call_usermodehelper_setup(path, argv, envp, gfp_mask); - if (info == NULL) - return -ENOMEM; - - call_usermodehelper_setkeys(info, session_keyring); - return call_usermodehelper_exec(info, wait); + return call_usermodehelper_fns(path, argv, envp, wait, + NULL, NULL, NULL); } extern void usermodehelper_init(void); -struct file; -extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[], - struct file **filp); - extern int usermodehelper_disable(void); extern void usermodehelper_enable(void); diff --git a/include/linux/lcd.h b/include/linux/lcd.h index c67fecafff90..8877123f2d6e 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -69,6 +69,29 @@ struct lcd_device { struct device dev; }; +struct lcd_platform_data { + /* reset lcd panel device. */ + int (*reset)(struct lcd_device *ld); + /* on or off to lcd panel. if 'enable' is 0 then + lcd power off and 1, lcd power on. */ + int (*power_on)(struct lcd_device *ld, int enable); + + /* it indicates whether lcd panel was enabled + from bootloader or not. */ + int lcd_enabled; + /* it means delay for stable time when it becomes low to high + or high to low that is dependent on whether reset gpio is + low active or high active. */ + unsigned int reset_delay; + /* stable time needing to become lcd power on. */ + unsigned int power_on_delay; + /* stable time needing to become lcd power off. */ + unsigned int power_off_delay; + + /* it could be used for any purpose. */ + void *pdata; +}; + static inline void lcd_set_power(struct lcd_device *ld, int power) { mutex_lock(&ld->update_lock); diff --git a/include/linux/leds.h b/include/linux/leds.h index d8bf9665e70c..ba6986a11663 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -149,14 +149,18 @@ struct gpio_led { unsigned default_state : 2; /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ }; -#define LEDS_GPIO_DEFSTATE_OFF 0 -#define LEDS_GPIO_DEFSTATE_ON 1 -#define LEDS_GPIO_DEFSTATE_KEEP 2 +#define LEDS_GPIO_DEFSTATE_OFF 0 +#define LEDS_GPIO_DEFSTATE_ON 1 +#define LEDS_GPIO_DEFSTATE_KEEP 2 struct gpio_led_platform_data { int num_leds; struct gpio_led *leds; - int (*gpio_blink_set)(unsigned gpio, + +#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ +#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ +#define GPIO_LED_BLINK 2 /* Plase, blink */ + int (*gpio_blink_set)(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off); }; diff --git a/include/linux/libata.h b/include/linux/libata.h index ee84e7e12039..3bad2701bfa6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -386,6 +386,7 @@ enum { ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ + ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -513,7 +514,9 @@ struct ata_ioports { void __iomem *command_addr; void __iomem *altstatus_addr; void __iomem *ctl_addr; +#ifdef CONFIG_ATA_BMDMA void __iomem *bmdma_addr; +#endif /* CONFIG_ATA_BMDMA */ void __iomem *scr_addr; }; #endif /* CONFIG_ATA_SFF */ @@ -721,8 +724,10 @@ struct ata_port { u8 ctl; /* cache of ATA control register */ u8 last_ctl; /* Cache last written value */ struct delayed_work sff_pio_task; +#ifdef CONFIG_ATA_BMDMA struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ +#endif /* CONFIG_ATA_BMDMA */ #endif /* CONFIG_ATA_SFF */ unsigned int pio_mask; @@ -856,10 +861,12 @@ struct ata_port_operations { void (*sff_irq_clear)(struct ata_port *); void (*sff_drain_fifo)(struct ata_queued_cmd *qc); +#ifdef CONFIG_ATA_BMDMA void (*bmdma_setup)(struct ata_queued_cmd *qc); void (*bmdma_start)(struct ata_queued_cmd *qc); void (*bmdma_stop)(struct ata_queued_cmd *qc); u8 (*bmdma_status)(struct ata_port *ap); +#endif /* CONFIG_ATA_BMDMA */ #endif /* CONFIG_ATA_SFF */ ssize_t (*em_show)(struct ata_port *ap, char *buf); @@ -1555,7 +1562,6 @@ extern void sata_pmp_error_handler(struct ata_port *ap); #ifdef CONFIG_ATA_SFF extern const struct ata_port_operations ata_sff_port_ops; -extern const struct ata_port_operations ata_bmdma_port_ops; extern const struct ata_port_operations ata_bmdma32_port_ops; /* PIO only, sg_tablesize and dma_boundary limits can be removed */ @@ -1564,11 +1570,6 @@ extern const struct ata_port_operations ata_bmdma32_port_ops; .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY -#define ATA_BMDMA_SHT(drv_name) \ - ATA_BASE_SHT(drv_name), \ - .sg_tablesize = LIBATA_MAX_PRD, \ - .dma_boundary = ATA_DMA_BOUNDARY - extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); extern u8 ata_sff_check_status(struct ata_port *ap); extern void ata_sff_pause(struct ata_port *ap); @@ -1593,7 +1594,7 @@ extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); -extern unsigned int ata_sff_host_intr(struct ata_port *ap, +extern unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); extern void ata_sff_lost_interrupt(struct ata_port *ap); @@ -1625,11 +1626,24 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev, struct scsi_host_template *sht, void *host_priv, int hflags); #endif /* CONFIG_PCI */ +#ifdef CONFIG_ATA_BMDMA + +extern const struct ata_port_operations ata_bmdma_port_ops; + +#define ATA_BMDMA_SHT(drv_name) \ + ATA_BASE_SHT(drv_name), \ + .sg_tablesize = LIBATA_MAX_PRD, \ + .dma_boundary = ATA_DMA_BOUNDARY + extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, + struct ata_queued_cmd *qc); +extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); extern void ata_bmdma_error_handler(struct ata_port *ap); extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); +extern void ata_bmdma_irq_clear(struct ata_port *ap); extern void ata_bmdma_setup(struct ata_queued_cmd *qc); extern void ata_bmdma_start(struct ata_queued_cmd *qc); extern void ata_bmdma_stop(struct ata_queued_cmd *qc); @@ -1640,7 +1654,15 @@ extern int ata_bmdma_port_start32(struct ata_port *ap); #ifdef CONFIG_PCI extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); extern void ata_pci_bmdma_init(struct ata_host *host); +extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct ata_host **r_host); +extern int ata_pci_bmdma_init_one(struct pci_dev *pdev, + const struct ata_port_info * const * ppi, + struct scsi_host_template *sht, + void *host_priv, int hflags); #endif /* CONFIG_PCI */ +#endif /* CONFIG_ATA_BMDMA */ /** * ata_sff_busy_wait - Wait for a port status register diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 5126cceb6ae9..7135ebc8428c 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -18,8 +18,8 @@ # define asmregparm #endif -#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE) -#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) +#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) /* * For assembly routines. @@ -27,8 +27,8 @@ * Note when using these that you must specify the appropriate * alignment directives yourself */ -#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" -#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" +#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" /* * This is used by architectures to keep arguments on the stack diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 05894795fdc1..9411d32840b0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -90,7 +90,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); extern int -mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); +mem_cgroup_prepare_migration(struct page *page, + struct page *newpage, struct mem_cgroup **ptr); extern void mem_cgroup_end_migration(struct mem_cgroup *mem, struct page *oldpage, struct page *newpage); @@ -227,7 +228,8 @@ static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) } static inline int -mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) +mem_cgroup_prepare_migration(struct page *page, struct page *newpage, + struct mem_cgroup **ptr) { return 0; } diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h index e3c4ff8c3e38..bfd23bef7363 100644 --- a/include/linux/mfd/88pm860x.h +++ b/include/linux/mfd/88pm860x.h @@ -370,7 +370,7 @@ extern int pm860x_set_bits(struct i2c_client *, int, unsigned char, unsigned char); extern int pm860x_device_init(struct pm860x_chip *chip, - struct pm860x_platform_data *pdata); -extern void pm860x_device_exit(struct pm860x_chip *chip); + struct pm860x_platform_data *pdata) __devinit ; +extern void pm860x_device_exit(struct pm860x_chip *chip) __devexit ; #endif /* __LINUX_MFD_88PM860X_H */ diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h deleted file mode 100644 index a42a7033ae53..000000000000 --- a/include/linux/mfd/ab4500.h +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright (C) 2009 ST-Ericsson - * - * Author: Srinidhi KASAGAR - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, as - * published by the Free Software Foundation. - * - * AB4500 device core funtions, for client access - */ -#ifndef MFD_AB4500_H -#define MFD_AB4500_H - -#include - -/* - * AB4500 bank addresses - */ -#define AB4500_SYS_CTRL1_BLOCK 0x1 -#define AB4500_SYS_CTRL2_BLOCK 0x2 -#define AB4500_REGU_CTRL1 0x3 -#define AB4500_REGU_CTRL2 0x4 -#define AB4500_USB 0x5 -#define AB4500_TVOUT 0x6 -#define AB4500_DBI 0x7 -#define AB4500_ECI_AV_ACC 0x8 -#define AB4500_RESERVED 0x9 -#define AB4500_GPADC 0xA -#define AB4500_CHARGER 0xB -#define AB4500_GAS_GAUGE 0xC -#define AB4500_AUDIO 0xD -#define AB4500_INTERRUPT 0xE -#define AB4500_RTC 0xF -#define AB4500_MISC 0x10 -#define AB4500_DEBUG 0x12 -#define AB4500_PROD_TEST 0x13 -#define AB4500_OTP_EMUL 0x15 - -/* - * System control 1 register offsets. - * Bank = 0x01 - */ -#define AB4500_TURNON_STAT_REG 0x0100 -#define AB4500_RESET_STAT_REG 0x0101 -#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102 - -#define AB4500_FSM_STAT1_REG 0x0140 -#define AB4500_FSM_STAT2_REG 0x0141 -#define AB4500_SYSCLK_REQ_STAT_REG 0x0142 -#define AB4500_USB_STAT1_REG 0x0143 -#define AB4500_USB_STAT2_REG 0x0144 -#define AB4500_STATUS_SPARE1_REG 0x0145 -#define AB4500_STATUS_SPARE2_REG 0x0146 - -#define AB4500_CTRL1_REG 0x0180 -#define AB4500_CTRL2_REG 0x0181 - -/* - * System control 2 register offsets. - * bank = 0x02 - */ -#define AB4500_CTRL3_REG 0x0200 -#define AB4500_MAIN_WDOG_CTRL_REG 0x0201 -#define AB4500_MAIN_WDOG_TIMER_REG 0x0202 -#define AB4500_LOW_BAT_REG 0x0203 -#define AB4500_BATT_OK_REG 0x0204 -#define AB4500_SYSCLK_TIMER_REG 0x0205 -#define AB4500_SMPSCLK_CTRL_REG 0x0206 -#define AB4500_SMPSCLK_SEL1_REG 0x0207 -#define AB4500_SMPSCLK_SEL2_REG 0x0208 -#define AB4500_SMPSCLK_SEL3_REG 0x0209 -#define AB4500_SYSULPCLK_CONF_REG 0x020A -#define AB4500_SYSULPCLK_CTRL1_REG 0x020B -#define AB4500_SYSCLK_CTRL_REG 0x020C -#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D -#define AB4500_SYSCLK_REQ_VALID_REG 0x020E -#define AB4500_SYSCTRL_SPARE_REG 0x020F -#define AB4500_PAD_CONF_REG 0x0210 - -/* - * Regu control1 register offsets - * Bank = 0x03 - */ -#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300 -#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301 -#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302 -#define AB4500_REGU_REQ_CTRL1_REG 0x0303 -#define AB4500_REGU_REQ_CTRL2_REG 0x0304 -#define AB4500_REGU_REQ_CTRL3_REG 0x0305 -#define AB4500_REGU_REQ_CTRL4_REG 0x0306 -#define AB4500_REGU_MISC1_REG 0x0380 -#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381 -#define AB4500_REGU_VUSB_CTRL_REG 0x0382 -#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383 -#define AB4500_REGU_CTRL1_SPARE_REG 0x0384 - -/* - * Regu control2 Vmod register offsets - */ -#define AB4500_REGU_VMOD_REGU_REG 0x0440 -#define AB4500_REGU_VMOD_SEL1_REG 0x0441 -#define AB4500_REGU_VMOD_SEL2_REG 0x0442 -#define AB4500_REGU_CTRL_DISCH_REG 0x0443 -#define AB4500_REGU_CTRL_DISCH2_REG 0x0444 - -/* - * USB/ULPI register offsets - * Bank : 0x5 - */ -#define AB4500_USB_LINE_STAT_REG 0x0580 -#define AB4500_USB_LINE_CTRL1_REG 0x0581 -#define AB4500_USB_LINE_CTRL2_REG 0x0582 -#define AB4500_USB_LINE_CTRL3_REG 0x0583 -#define AB4500_USB_LINE_CTRL4_REG 0x0584 -#define AB4500_USB_LINE_CTRL5_REG 0x0585 -#define AB4500_USB_OTG_CTRL_REG 0x0587 -#define AB4500_USB_OTG_STAT_REG 0x0588 -#define AB4500_USB_OTG_STAT_REG 0x0588 -#define AB4500_USB_CTRL_SPARE_REG 0x0589 -#define AB4500_USB_PHY_CTRL_REG 0x058A - -/* - * TVOUT / CTRL register offsets - * Bank : 0x06 - */ -#define AB4500_TVOUT_CTRL_REG 0x0680 - -/* - * DBI register offsets - * Bank : 0x07 - */ -#define AB4500_DBI_REG1_REG 0x0700 -#define AB4500_DBI_REG2_REG 0x0701 - -/* - * ECI regsiter offsets - * Bank : 0x08 - */ -#define AB4500_ECI_CTRL_REG 0x0800 -#define AB4500_ECI_HOOKLEVEL_REG 0x0801 -#define AB4500_ECI_DATAOUT_REG 0x0802 -#define AB4500_ECI_DATAIN_REG 0x0803 - -/* - * AV Connector register offsets - * Bank : 0x08 - */ -#define AB4500_AV_CONN_REG 0x0840 - -/* - * Accessory detection register offsets - * Bank : 0x08 - */ -#define AB4500_ACC_DET_DB1_REG 0x0880 -#define AB4500_ACC_DET_DB2_REG 0x0881 - -/* - * GPADC register offsets - * Bank : 0x0A - */ -#define AB4500_GPADC_CTRL1_REG 0x0A00 -#define AB4500_GPADC_CTRL2_REG 0x0A01 -#define AB4500_GPADC_CTRL3_REG 0x0A02 -#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03 -#define AB4500_GPADC_STAT_REG 0x0A04 -#define AB4500_GPADC_MANDATAL_REG 0x0A05 -#define AB4500_GPADC_MANDATAH_REG 0x0A06 -#define AB4500_GPADC_AUTODATAL_REG 0x0A07 -#define AB4500_GPADC_AUTODATAH_REG 0x0A08 -#define AB4500_GPADC_MUX_CTRL_REG 0x0A09 - -/* - * Charger / status register offfsets - * Bank : 0x0B - */ -#define AB4500_CH_STATUS1_REG 0x0B00 -#define AB4500_CH_STATUS2_REG 0x0B01 -#define AB4500_CH_USBCH_STAT1_REG 0x0B02 -#define AB4500_CH_USBCH_STAT2_REG 0x0B03 -#define AB4500_CH_FSM_STAT_REG 0x0B04 -#define AB4500_CH_STAT_REG 0x0B05 - -/* - * Charger / control register offfsets - * Bank : 0x0B - */ -#define AB4500_CH_VOLT_LVL_REG 0x0B40 - -/* - * Charger / main control register offfsets - * Bank : 0x0B - */ -#define AB4500_MCH_CTRL1 0x0B80 -#define AB4500_MCH_CTRL2 0x0B81 -#define AB4500_MCH_IPT_CURLVL_REG 0x0B82 -#define AB4500_CH_WD_REG 0x0B83 - -/* - * Charger / USB control register offsets - * Bank : 0x0B - */ -#define AB4500_USBCH_CTRL1_REG 0x0BC0 -#define AB4500_USBCH_CTRL2_REG 0x0BC1 -#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2 - -/* - * RTC bank register offsets - * Bank : 0xF - */ -#define AB4500_RTC_SOFF_STAT_REG 0x0F00 -#define AB4500_RTC_CC_CONF_REG 0x0F01 -#define AB4500_RTC_READ_REQ_REG 0x0F02 -#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03 -#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04 -#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05 -#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06 -#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07 -#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08 -#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09 -#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A -#define AB4500_RTC_STAT_REG 0x0F0B -#define AB4500_RTC_BKUP_CHG_REG 0x0F0C -#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D -#define AB4500_RTC_CALIB_REG 0x0F0E -#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F - -/* - * PWM Out generators - * Bank: 0x10 - */ -#define AB4500_PWM_OUT_CTRL1_REG 0x1060 -#define AB4500_PWM_OUT_CTRL2_REG 0x1061 -#define AB4500_PWM_OUT_CTRL3_REG 0x1062 -#define AB4500_PWM_OUT_CTRL4_REG 0x1063 -#define AB4500_PWM_OUT_CTRL5_REG 0x1064 -#define AB4500_PWM_OUT_CTRL6_REG 0x1065 -#define AB4500_PWM_OUT_CTRL7_REG 0x1066 - -#define AB4500_I2C_PAD_CTRL_REG 0x1067 -#define AB4500_REV_REG 0x1080 - -/** - * struct ab4500 - * @spi: spi device structure - * @tx_buf: transmit buffer - * @rx_buf: receive buffer - * @lock: sync primitive - */ -struct ab4500 { - struct spi_device *spi; - unsigned long tx_buf[4]; - unsigned long rx_buf[4]; - struct mutex lock; -}; - -int ab4500_write(struct ab4500 *ab4500, unsigned char block, - unsigned long addr, unsigned char data); -int ab4500_read(struct ab4500 *ab4500, unsigned char block, - unsigned long addr); - -#endif /* MFD_AB4500_H */ diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h new file mode 100644 index 000000000000..b63ff3ba3351 --- /dev/null +++ b/include/linux/mfd/ab8500.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Srinidhi Kasagar + */ +#ifndef MFD_AB8500_H +#define MFD_AB8500_H + +#include + +/* + * Interrupts + */ + +#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 +#define AB8500_INT_UN_PLUG_TV_DET 1 +#define AB8500_INT_PLUG_TV_DET 2 +#define AB8500_INT_TEMP_WARM 3 +#define AB8500_INT_PON_KEY2DB_F 4 +#define AB8500_INT_PON_KEY2DB_R 5 +#define AB8500_INT_PON_KEY1DB_F 6 +#define AB8500_INT_PON_KEY1DB_R 7 +#define AB8500_INT_BATT_OVV 8 +#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 +#define AB8500_INT_MAIN_CH_PLUG_DET 11 +#define AB8500_INT_USB_ID_DET_F 12 +#define AB8500_INT_USB_ID_DET_R 13 +#define AB8500_INT_VBUS_DET_F 14 +#define AB8500_INT_VBUS_DET_R 15 +#define AB8500_INT_VBUS_CH_DROP_END 16 +#define AB8500_INT_RTC_60S 17 +#define AB8500_INT_RTC_ALARM 18 +#define AB8500_INT_BAT_CTRL_INDB 20 +#define AB8500_INT_CH_WD_EXP 21 +#define AB8500_INT_VBUS_OVV 22 +#define AB8500_INT_MAIN_CH_DROP_END 23 +#define AB8500_INT_CCN_CONV_ACC 24 +#define AB8500_INT_INT_AUD 25 +#define AB8500_INT_CCEOC 26 +#define AB8500_INT_CC_INT_CALIB 27 +#define AB8500_INT_LOW_BAT_F 28 +#define AB8500_INT_LOW_BAT_R 29 +#define AB8500_INT_BUP_CHG_NOT_OK 30 +#define AB8500_INT_BUP_CHG_OK 31 +#define AB8500_INT_GP_HW_ADC_CONV_END 32 +#define AB8500_INT_ACC_DETECT_1DB_F 33 +#define AB8500_INT_ACC_DETECT_1DB_R 34 +#define AB8500_INT_ACC_DETECT_22DB_F 35 +#define AB8500_INT_ACC_DETECT_22DB_R 36 +#define AB8500_INT_ACC_DETECT_21DB_F 37 +#define AB8500_INT_ACC_DETECT_21DB_R 38 +#define AB8500_INT_GP_SW_ADC_CONV_END 39 +#define AB8500_INT_BTEMP_LOW 72 +#define AB8500_INT_BTEMP_LOW_MEDIUM 73 +#define AB8500_INT_BTEMP_MEDIUM_HIGH 74 +#define AB8500_INT_BTEMP_HIGH 75 +#define AB8500_INT_USB_CHARGER_NOT_OK 81 +#define AB8500_INT_ID_WAKEUP_R 82 +#define AB8500_INT_ID_DET_R1R 84 +#define AB8500_INT_ID_DET_R2R 85 +#define AB8500_INT_ID_DET_R3R 86 +#define AB8500_INT_ID_DET_R4R 87 +#define AB8500_INT_ID_WAKEUP_F 88 +#define AB8500_INT_ID_DET_R1F 90 +#define AB8500_INT_ID_DET_R2F 91 +#define AB8500_INT_ID_DET_R3F 92 +#define AB8500_INT_ID_DET_R4F 93 +#define AB8500_INT_USB_CHG_DET_DONE 94 +#define AB8500_INT_USB_CH_TH_PROT_F 96 +#define AB8500_INT_USB_CH_TH_PROP_R 97 +#define AB8500_INT_MAIN_CH_TH_PROP_F 98 +#define AB8500_INT_MAIN_CH_TH_PROT_R 99 +#define AB8500_INT_USB_CHARGER_NOT_OKF 103 + +#define AB8500_NR_IRQS 104 +#define AB8500_NUM_IRQ_REGS 13 + +/** + * struct ab8500 - ab8500 internal structure + * @dev: parent device + * @lock: read/write operations lock + * @irq_lock: genirq bus lock + * @revision: chip revision + * @irq: irq line + * @write: register write + * @read: register read + * @rx_buf: rx buf for SPI + * @tx_buf: tx buf for SPI + * @mask: cache of IRQ regs for bus lock + * @oldmask: cache of previous IRQ regs for bus lock + */ +struct ab8500 { + struct device *dev; + struct mutex lock; + struct mutex irq_lock; + int revision; + int irq_base; + int irq; + + int (*write) (struct ab8500 *a8500, u16 addr, u8 data); + int (*read) (struct ab8500 *a8500, u16 addr); + + unsigned long tx_buf[4]; + unsigned long rx_buf[4]; + + u8 mask[AB8500_NUM_IRQ_REGS]; + u8 oldmask[AB8500_NUM_IRQ_REGS]; +}; + +/** + * struct ab8500_platform_data - AB8500 platform data + * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used + * @init: board-specific initialization after detection of ab8500 + */ +struct ab8500_platform_data { + int irq_base; + void (*init) (struct ab8500 *); +}; + +extern int ab8500_write(struct ab8500 *a8500, u16 addr, u8 data); +extern int ab8500_read(struct ab8500 *a8500, u16 addr); +extern int ab8500_set_bits(struct ab8500 *a8500, u16 addr, u8 mask, u8 data); + +extern int __devinit ab8500_init(struct ab8500 *ab8500); +extern int __devexit ab8500_exit(struct ab8500 *ab8500); + +#endif /* MFD_AB8500_H */ diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/abx500.h similarity index 51% rename from include/linux/mfd/ab3100.h rename to include/linux/mfd/abx500.h index 9a881c305a50..390726fcbcb1 100644 --- a/include/linux/mfd/ab3100.h +++ b/include/linux/mfd/abx500.h @@ -3,17 +3,37 @@ * License terms: GNU General Public License (GPL) version 2 * AB3100 core access functions * Author: Linus Walleij + * + * ABX500 core access functions. + * The abx500 interface is used for the Analog Baseband chip + * ab3100, ab3550, ab5500 and possibly comming. It is not used for + * ab4500 and ab8500 since they are another family of chip. + * + * Author: Mattias Wallin + * Author: Mattias Nilsson + * Author: Bengt Jonsson + * Author: Rickard Andersson */ #include #include -#ifndef MFD_AB3100_H -#define MFD_AB3100_H +#ifndef MFD_ABX500_H +#define MFD_ABX500_H -#define ABUNKNOWN 0 -#define AB3000 1 -#define AB3100 2 +#define AB3100_P1A 0xc0 +#define AB3100_P1B 0xc1 +#define AB3100_P1C 0xc2 +#define AB3100_P1D 0xc3 +#define AB3100_P1E 0xc4 +#define AB3100_P1F 0xc5 +#define AB3100_P1G 0xc6 +#define AB3100_R2A 0xc7 +#define AB3100_R2B 0xc8 +#define AB3550_P1A 0x10 +#define AB5500_1_0 0x20 +#define AB5500_2_0 0x21 +#define AB5500_2_1 0x22 /* * AB3100, EVENTA1, A2 and A3 event register flags @@ -89,7 +109,7 @@ struct ab3100 { char chip_name[32]; u8 chip_id; struct blocking_notifier_head event_subscribers; - u32 startup_events; + u8 startup_events[3]; bool startup_events_read; }; @@ -112,18 +132,102 @@ struct ab3100_platform_data { int external_voltage; }; -int ab3100_set_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 regval); -int ab3100_get_register_interruptible(struct ab3100 *ab3100, u8 reg, u8 *regval); -int ab3100_get_register_page_interruptible(struct ab3100 *ab3100, - u8 first_reg, u8 *regvals, u8 numregs); -int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100, - u8 reg, u8 andmask, u8 ormask); -u8 ab3100_get_chip_type(struct ab3100 *ab3100); int ab3100_event_register(struct ab3100 *ab3100, struct notifier_block *nb); int ab3100_event_unregister(struct ab3100 *ab3100, struct notifier_block *nb); -int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, - u32 *fatevent); +/* AB3550, STR register flags */ +#define AB3550_STR_ONSWA (0x01) +#define AB3550_STR_ONSWB (0x02) +#define AB3550_STR_ONSWC (0x04) +#define AB3550_STR_DCIO (0x08) +#define AB3550_STR_BOOT_MODE (0x10) +#define AB3550_STR_SIM_OFF (0x20) +#define AB3550_STR_BATT_REMOVAL (0x40) +#define AB3550_STR_VBUS (0x80) + +/* Interrupt mask registers */ +#define AB3550_IMR1 0x29 +#define AB3550_IMR2 0x2a +#define AB3550_IMR3 0x2b +#define AB3550_IMR4 0x2c +#define AB3550_IMR5 0x2d + +enum ab3550_devid { + AB3550_DEVID_ADC, + AB3550_DEVID_DAC, + AB3550_DEVID_LEDS, + AB3550_DEVID_POWER, + AB3550_DEVID_REGULATORS, + AB3550_DEVID_SIM, + AB3550_DEVID_UART, + AB3550_DEVID_RTC, + AB3550_DEVID_CHARGER, + AB3550_DEVID_FUELGAUGE, + AB3550_DEVID_VIBRATOR, + AB3550_DEVID_CODEC, + AB3550_NUM_DEVICES, +}; + +/** + * struct abx500_init_setting + * Initial value of the registers for driver to use during setup. + */ +struct abx500_init_settings { + u8 bank; + u8 reg; + u8 setting; +}; + +/** + * struct ab3550_platform_data + * Data supplied to initialize board connections to the AB3550 + */ +struct ab3550_platform_data { + struct {unsigned int base; unsigned int count; } irq; + void *dev_data[AB3550_NUM_DEVICES]; + size_t dev_data_sz[AB3550_NUM_DEVICES]; + struct abx500_init_settings *init_settings; + unsigned int init_settings_sz; +}; + +int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 value); +int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg, + u8 *value); +int abx500_get_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +int abx500_set_register_page_interruptible(struct device *dev, u8 bank, + u8 first_reg, u8 *regvals, u8 numregs); +/** + * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a + * target register + * + * @dev: The AB sub device. + * @bank: The i2c bank number. + * @bitmask: The bit mask to use. + * @bitvalues: The new bit values. + * + * Updates the value of an AB register: + * value -> ((value & ~bitmask) | (bitvalues & bitmask)) + */ +int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank, + u8 reg, u8 bitmask, u8 bitvalues); +int abx500_get_chip_id(struct device *dev); +int abx500_event_registers_startup_state_get(struct device *dev, u8 *event); +int abx500_startup_irq_enabled(struct device *dev, unsigned int irq); + +struct abx500_ops { + int (*get_chip_id) (struct device *); + int (*get_register) (struct device *, u8, u8, u8 *); + int (*set_register) (struct device *, u8, u8, u8); + int (*get_register_page) (struct device *, u8, u8, u8 *, u8); + int (*set_register_page) (struct device *, u8, u8, u8 *, u8); + int (*mask_and_set_register) (struct device *, u8, u8, u8, u8); + int (*event_registers_startup_state_get) (struct device *, u8 *); + int (*startup_irq_enabled) (struct device *, unsigned int); +}; + +int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops); #endif diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h new file mode 100644 index 000000000000..e9994c469803 --- /dev/null +++ b/include/linux/mfd/janz.h @@ -0,0 +1,54 @@ +/* + * Common Definitions for Janz MODULbus devices + * + * Copyright (c) 2010 Ira W. Snyder + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef JANZ_H +#define JANZ_H + +struct janz_platform_data { + /* MODULbus Module Number */ + unsigned int modno; +}; + +/* PLX bridge chip onboard registers */ +struct janz_cmodio_onboard_regs { + u8 unused1; + + /* + * Read access: interrupt status + * Write access: interrupt disable + */ + u8 int_disable; + u8 unused2; + + /* + * Read access: MODULbus number (hex switch) + * Write access: interrupt enable + */ + u8 int_enable; + u8 unused3; + + /* write-only */ + u8 reset_assert; + u8 unused4; + + /* write-only */ + u8 reset_deassert; + u8 unused5; + + /* read-write access to serial EEPROM */ + u8 eep; + u8 unused6; + + /* write-only access to EEPROM chip select */ + u8 enid; +}; + +#endif /* JANZ_H */ diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h index 8895d9d8879c..4a894f688549 100644 --- a/include/linux/mfd/mc13783.h +++ b/include/linux/mfd/mc13783.h @@ -64,6 +64,70 @@ static inline int mc13783_ackirq(struct mc13783 *mc13783, int irq) MC13783_ADC0_TSMOD1 | \ MC13783_ADC0_TSMOD2) +struct mc13783_led_platform_data { +#define MC13783_LED_MD 0 +#define MC13783_LED_AD 1 +#define MC13783_LED_KP 2 +#define MC13783_LED_R1 3 +#define MC13783_LED_G1 4 +#define MC13783_LED_B1 5 +#define MC13783_LED_R2 6 +#define MC13783_LED_G2 7 +#define MC13783_LED_B2 8 +#define MC13783_LED_R3 9 +#define MC13783_LED_G3 10 +#define MC13783_LED_B3 11 +#define MC13783_LED_MAX MC13783_LED_B3 + int id; + const char *name; + const char *default_trigger; + +/* Three or two bits current selection depending on the led */ + char max_current; +}; + +struct mc13783_leds_platform_data { + int num_leds; + struct mc13783_led_platform_data *led; + +#define MC13783_LED_TRIODE_MD (1 << 0) +#define MC13783_LED_TRIODE_AD (1 << 1) +#define MC13783_LED_TRIODE_KP (1 << 2) +#define MC13783_LED_BOOST_EN (1 << 3) +#define MC13783_LED_TC1HALF (1 << 4) +#define MC13783_LED_SLEWLIMTC (1 << 5) +#define MC13783_LED_SLEWLIMBL (1 << 6) +#define MC13783_LED_TRIODE_TC1 (1 << 7) +#define MC13783_LED_TRIODE_TC2 (1 << 8) +#define MC13783_LED_TRIODE_TC3 (1 << 9) + int flags; + +#define MC13783_LED_AB_DISABLED 0 +#define MC13783_LED_AB_MD1 1 +#define MC13783_LED_AB_MD12 2 +#define MC13783_LED_AB_MD123 3 +#define MC13783_LED_AB_MD1234 4 +#define MC13783_LED_AB_MD1234_AD1 5 +#define MC13783_LED_AB_MD1234_AD12 6 +#define MC13783_LED_AB_MD1_AD 7 + char abmode; + +#define MC13783_LED_ABREF_200MV 0 +#define MC13783_LED_ABREF_400MV 1 +#define MC13783_LED_ABREF_600MV 2 +#define MC13783_LED_ABREF_800MV 3 + char abref; + +#define MC13783_LED_PERIOD_10MS 0 +#define MC13783_LED_PERIOD_100MS 1 +#define MC13783_LED_PERIOD_500MS 2 +#define MC13783_LED_PERIOD_2S 3 + char bl_period; + char tc1_period; + char tc2_period; + char tc3_period; +}; + /* to be cleaned up */ struct regulator_init_data; @@ -80,12 +144,14 @@ struct mc13783_regulator_platform_data { struct mc13783_platform_data { int num_regulators; struct mc13783_regulator_init_data *regulators; + struct mc13783_leds_platform_data *leds; #define MC13783_USE_TOUCHSCREEN (1 << 0) #define MC13783_USE_CODEC (1 << 1) #define MC13783_USE_ADC (1 << 2) #define MC13783_USE_RTC (1 << 3) #define MC13783_USE_REGULATOR (1 << 4) +#define MC13783_USE_LED (1 << 5) unsigned int flags; }; diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h new file mode 100644 index 000000000000..83747e217b27 --- /dev/null +++ b/include/linux/mfd/pcf50633/backlight.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen + * PCF50633 backlight device driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#ifndef __LINUX_MFD_PCF50633_BACKLIGHT +#define __LINUX_MFD_PCF50633_BACKLIGHT + +/* +* @default_brightness: Backlight brightness is initialized to this value +* +* Brightness to be used after the driver has been probed. +* Valid range 0-63. +* +* @default_brightness_limit: The actual brightness is limited by this value +* +* Brightness limit to be used after the driver has been probed. This is useful +* when it is not known how much power is available for the backlight during +* probe. +* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit. +* +* @ramp_time: Display ramp time when changing brightness +* +* When changing the backlights brightness the change is not instant, instead +* it fades smooth from one state to another. This value specifies how long +* the fade should take. The lower the value the higher the fade time. +* Valid range 0-255 +*/ +struct pcf50633_bl_platform_data { + unsigned int default_brightness; + unsigned int default_brightness_limit; + uint8_t ramp_time; +}; + + +struct pcf50633; + +int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit); + +#endif + diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 3398bd9aab11..ad411a78870c 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -18,6 +18,7 @@ #include #include #include +#include struct pcf50633; @@ -43,6 +44,8 @@ struct pcf50633_platform_data { void (*force_shutdown)(struct pcf50633 *); u8 resumers[5]; + + struct pcf50633_bl_platform_data *backlight_data; }; struct pcf50633_irq { @@ -152,6 +155,7 @@ struct pcf50633 { struct platform_device *mbc_pdev; struct platform_device *adc_pdev; struct platform_device *input_pdev; + struct platform_device *bl_pdev; struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS]; }; diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h new file mode 100644 index 000000000000..4bdf19c8eedf --- /dev/null +++ b/include/linux/mfd/rdc321x.h @@ -0,0 +1,26 @@ +#ifndef __RDC321X_MFD_H +#define __RDC321X_MFD_H + +#include +#include + +/* Offsets to be accessed in the southbridge PCI + * device configuration register */ +#define RDC321X_WDT_CTRL 0x44 +#define RDC321X_GPIO_CTRL_REG1 0x48 +#define RDC321X_GPIO_DATA_REG1 0x4c +#define RDC321X_GPIO_CTRL_REG2 0x84 +#define RDC321X_GPIO_DATA_REG2 0x88 + +#define RDC321X_MAX_GPIO 58 + +struct rdc321x_gpio_pdata { + struct pci_dev *sb_pdev; + unsigned max_gpios; +}; + +struct rdc321x_wdt_pdata { + struct pci_dev *sb_pdev; +}; + +#endif /* __RDC321X_MFD_H */ diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h new file mode 100644 index 000000000000..e47f770d3068 --- /dev/null +++ b/include/linux/mfd/tc35892.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License, version 2 + */ + +#ifndef __LINUX_MFD_TC35892_H +#define __LINUX_MFD_TC35892_H + +#include + +#define TC35892_RSTCTRL_IRQRST (1 << 4) +#define TC35892_RSTCTRL_TIMRST (1 << 3) +#define TC35892_RSTCTRL_ROTRST (1 << 2) +#define TC35892_RSTCTRL_KBDRST (1 << 1) +#define TC35892_RSTCTRL_GPIRST (1 << 0) + +#define TC35892_IRQST 0x91 + +#define TC35892_MANFCODE_MAGIC 0x03 +#define TC35892_MANFCODE 0x80 +#define TC35892_VERSION 0x81 +#define TC35892_IOCFG 0xA7 + +#define TC35892_CLKMODE 0x88 +#define TC35892_CLKCFG 0x89 +#define TC35892_CLKEN 0x8A + +#define TC35892_RSTCTRL 0x82 +#define TC35892_EXTRSTN 0x83 +#define TC35892_RSTINTCLR 0x84 + +#define TC35892_GPIOIS0 0xC9 +#define TC35892_GPIOIS1 0xCA +#define TC35892_GPIOIS2 0xCB +#define TC35892_GPIOIBE0 0xCC +#define TC35892_GPIOIBE1 0xCD +#define TC35892_GPIOIBE2 0xCE +#define TC35892_GPIOIEV0 0xCF +#define TC35892_GPIOIEV1 0xD0 +#define TC35892_GPIOIEV2 0xD1 +#define TC35892_GPIOIE0 0xD2 +#define TC35892_GPIOIE1 0xD3 +#define TC35892_GPIOIE2 0xD4 +#define TC35892_GPIORIS0 0xD6 +#define TC35892_GPIORIS1 0xD7 +#define TC35892_GPIORIS2 0xD8 +#define TC35892_GPIOMIS0 0xD9 +#define TC35892_GPIOMIS1 0xDA +#define TC35892_GPIOMIS2 0xDB +#define TC35892_GPIOIC0 0xDC +#define TC35892_GPIOIC1 0xDD +#define TC35892_GPIOIC2 0xDE + +#define TC35892_GPIODATA0 0xC0 +#define TC35892_GPIOMASK0 0xc1 +#define TC35892_GPIODATA1 0xC2 +#define TC35892_GPIOMASK1 0xc3 +#define TC35892_GPIODATA2 0xC4 +#define TC35892_GPIOMASK2 0xC5 + +#define TC35892_GPIODIR0 0xC6 +#define TC35892_GPIODIR1 0xC7 +#define TC35892_GPIODIR2 0xC8 + +#define TC35892_GPIOSYNC0 0xE6 +#define TC35892_GPIOSYNC1 0xE7 +#define TC35892_GPIOSYNC2 0xE8 + +#define TC35892_GPIOWAKE0 0xE9 +#define TC35892_GPIOWAKE1 0xEA +#define TC35892_GPIOWAKE2 0xEB + +#define TC35892_GPIOODM0 0xE0 +#define TC35892_GPIOODE0 0xE1 +#define TC35892_GPIOODM1 0xE2 +#define TC35892_GPIOODE1 0xE3 +#define TC35892_GPIOODM2 0xE4 +#define TC35892_GPIOODE2 0xE5 + +#define TC35892_INT_GPIIRQ 0 +#define TC35892_INT_TI0IRQ 1 +#define TC35892_INT_TI1IRQ 2 +#define TC35892_INT_TI2IRQ 3 +#define TC35892_INT_ROTIRQ 5 +#define TC35892_INT_KBDIRQ 6 +#define TC35892_INT_PORIRQ 7 + +#define TC35892_NR_INTERNAL_IRQS 8 +#define TC35892_INT_GPIO(x) (TC35892_NR_INTERNAL_IRQS + (x)) + +struct tc35892 { + struct mutex lock; + struct device *dev; + struct i2c_client *i2c; + + int irq_base; + int num_gpio; + struct tc35892_platform_data *pdata; +}; + +extern int tc35892_reg_write(struct tc35892 *tc35892, u8 reg, u8 data); +extern int tc35892_reg_read(struct tc35892 *tc35892, u8 reg); +extern int tc35892_block_read(struct tc35892 *tc35892, u8 reg, u8 length, + u8 *values); +extern int tc35892_block_write(struct tc35892 *tc35892, u8 reg, u8 length, + const u8 *values); +extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val); + +/** + * struct tc35892_gpio_platform_data - TC35892 GPIO platform data + * @gpio_base: first gpio number assigned to TC35892. A maximum of + * %TC35892_NR_GPIOS GPIOs will be allocated. + */ +struct tc35892_gpio_platform_data { + int gpio_base; +}; + +/** + * struct tc35892_platform_data - TC35892 platform data + * @irq_base: base IRQ number. %TC35892_NR_IRQS irqs will be used. + * @gpio: GPIO-specific platform data + */ +struct tc35892_platform_data { + int irq_base; + struct tc35892_gpio_platform_data *gpio; +}; + +#define TC35892_NR_GPIOS 24 +#define TC35892_NR_IRQS TC35892_INT_GPIO(TC35892_NR_GPIOS) + +#endif diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h new file mode 100644 index 000000000000..c923e4864f55 --- /dev/null +++ b/include/linux/mfd/tps6507x.h @@ -0,0 +1,169 @@ +/* linux/mfd/tps6507x.h + * + * Functions to access TPS65070 power management chip. + * + * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) + * + * + * For licencing details see kernel-base/COPYING + */ + +#ifndef __LINUX_MFD_TPS6507X_H +#define __LINUX_MFD_TPS6507X_H + +/* + * ---------------------------------------------------------------------------- + * Registers, all 8 bits + * ---------------------------------------------------------------------------- + */ + + +/* Register definitions */ +#define TPS6507X_REG_PPATH1 0X01 +#define TPS6507X_CHG_USB BIT(7) +#define TPS6507X_CHG_AC BIT(6) +#define TPS6507X_CHG_USB_PW_ENABLE BIT(5) +#define TPS6507X_CHG_AC_PW_ENABLE BIT(4) +#define TPS6507X_CHG_AC_CURRENT BIT(2) +#define TPS6507X_CHG_USB_CURRENT BIT(0) + +#define TPS6507X_REG_INT 0X02 +#define TPS6507X_REG_MASK_AC_USB BIT(7) +#define TPS6507X_REG_MASK_TSC BIT(6) +#define TPS6507X_REG_MASK_PB_IN BIT(5) +#define TPS6507X_REG_TSC_INT BIT(3) +#define TPS6507X_REG_PB_IN_INT BIT(2) +#define TPS6507X_REG_AC_USB_APPLIED BIT(1) +#define TPS6507X_REG_AC_USB_REMOVED BIT(0) + +#define TPS6507X_REG_CHGCONFIG0 0X03 + +#define TPS6507X_REG_CHGCONFIG1 0X04 +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CHGCONFIG2 0X05 + +#define TPS6507X_REG_CHGCONFIG3 0X06 + +#define TPS6507X_REG_ADCONFIG 0X07 +#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7) +#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6) +#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5) +#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4) +#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0 +#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1 +#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2 +#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3 +#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4 +#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5 +#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6 +#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7 +#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8 +#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9 +#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10 +#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11 +#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12 +#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14 +#define TPS6507X_ADCONFIG_INPUT_TSC 15 + +#define TPS6507X_REG_TSCMODE 0X08 +#define TPS6507X_TSCMODE_X_POSITION 0 +#define TPS6507X_TSCMODE_Y_POSITION 1 +#define TPS6507X_TSCMODE_PRESSURE 2 +#define TPS6507X_TSCMODE_X_PLATE 3 +#define TPS6507X_TSCMODE_Y_PLATE 4 +#define TPS6507X_TSCMODE_STANDBY 5 +#define TPS6507X_TSCMODE_ADC_INPUT 6 +#define TPS6507X_TSCMODE_DISABLE 7 + +#define TPS6507X_REG_ADRESULT_1 0X09 + +#define TPS6507X_REG_ADRESULT_2 0X0A +#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0)) + +#define TPS6507X_REG_PGOOD 0X0B + +#define TPS6507X_REG_PGOODMASK 0X0C + +#define TPS6507X_REG_CON_CTRL1 0X0D +#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4) +#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3) +#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2) +#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1) +#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0) + +#define TPS6507X_REG_CON_CTRL2 0X0E + +#define TPS6507X_REG_CON_CTRL3 0X0F + +#define TPS6507X_REG_DEFDCDC1 0X10 +#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7) +#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_LOW 0X11 +#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC2_HIGH 0X12 +#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_LOW 0X13 +#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFDCDC3_HIGH 0X14 +#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F + +#define TPS6507X_REG_DEFSLEW 0X15 + +#define TPS6507X_REG_LDO_CTRL1 0X16 +#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F + +#define TPS6507X_REG_DEFLDO2 0X17 +#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F + +#define TPS6507X_REG_WLED_CTRL1 0X18 + +#define TPS6507X_REG_WLED_CTRL2 0X19 + +/* VDCDC MASK */ +#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F + +#define TPS6507X_MAX_REGISTER 0X19 + +/** + * struct tps6507x_board - packages regulator and touchscreen init data + * @tps6507x_regulator_data: regulator initialization values + * + * Board data may be used to initialize regulator and touchscreen. + */ + +struct tps6507x_board { + struct regulator_init_data *tps6507x_pmic_init_data; + struct touchscreen_init_data *tps6507x_ts_init_data; +}; + +/** + * struct tps6507x_dev - tps6507x sub-driver chip access routines + * @read_dev() - I2C register read function + * @write_dev() - I2C register write function + * + * Device data may be used to access the TPS6507x chip + */ + +struct tps6507x_dev { + struct device *dev; + struct i2c_client *i2c_client; + int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *dest); + int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size, + void *src); + + /* Client devices */ + struct tps6507x_pmic *pmic; + struct tps6507x_ts *ts; +}; + +#endif /* __LINUX_MFD_TPS6507X_H */ diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index 5915f6e3d9ab..eb5bd4e0e03c 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h @@ -256,8 +256,9 @@ struct wm831x { int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ /* Chip revision based flags */ - unsigned has_gpio_ena:1; /* Has GPIO enable bit */ - unsigned has_cs_sts:1; /* Has current sink status bit */ + unsigned has_gpio_ena:1; /* Has GPIO enable bit */ + unsigned has_cs_sts:1; /* Has current sink status bit */ + unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */ int num_gpio; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 3196c84cc630..f65913c9f5a4 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -230,7 +230,7 @@ static inline void *mmc_priv(struct mmc_host *host) #define mmc_classdev(x) (&(x)->class_dev) #define mmc_hostname(x) (dev_name(&(x)->class_dev)) -extern int mmc_suspend_host(struct mmc_host *, pm_message_t); +extern int mmc_suspend_host(struct mmc_host *); extern int mmc_resume_host(struct mmc_host *); extern void mmc_power_save_host(struct mmc_host *host); diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h new file mode 100644 index 000000000000..9188c973f3e1 --- /dev/null +++ b/include/linux/mmc/sdhci-spear.h @@ -0,0 +1,42 @@ +/* + * include/linux/mmc/sdhci-spear.h + * + * SDHCI declarations specific to ST SPEAr platform + * + * Copyright (C) 2010 ST Microelectronics + * Viresh Kumar + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef MMC_SDHCI_SPEAR_H +#define MMC_SDHCI_SPEAR_H + +#include +/* + * struct sdhci_plat_data: spear sdhci platform data structure + * + * @card_power_gpio: gpio pin for enabling/disabling power to sdhci socket + * @power_active_high: if set, enable power to sdhci socket by setting + * card_power_gpio + * @power_always_enb: If set, then enable power on probe, otherwise enable only + * on card insertion and disable on card removal. + * card_int_gpio: gpio pin used for card detection + */ +struct sdhci_plat_data { + int card_power_gpio; + int power_active_high; + int power_always_enb; + int card_int_gpio; +}; + +/* This function is used to set platform_data field of pdev->dev */ +static inline void +sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data) +{ + pdev->dev.platform_data = data; +} + +#endif /* MMC_SDHCI_SPEAR_H */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index c6c0cceba5fe..31baaf82f458 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -145,6 +145,9 @@ extern void sdio_writew(struct sdio_func *func, u16 b, extern void sdio_writel(struct sdio_func *func, u32 b, unsigned int addr, int *err_ret); +extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte, + unsigned int addr, int *err_ret); + extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr, void *src, int count); extern int sdio_writesb(struct sdio_func *func, unsigned int addr, diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h new file mode 100644 index 000000000000..aafe832f18aa --- /dev/null +++ b/include/linux/mmc/sh_mmcif.h @@ -0,0 +1,39 @@ +/* + * include/linux/mmc/sh_mmcif.h + * + * platform data for eMMC driver + * + * Copyright (C) 2010 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + */ + +#ifndef __SH_MMCIF_H__ +#define __SH_MMCIF_H__ + +/* + * MMCIF : CE_CLK_CTRL [19:16] + * 1000 : Peripheral clock / 512 + * 0111 : Peripheral clock / 256 + * 0110 : Peripheral clock / 128 + * 0101 : Peripheral clock / 64 + * 0100 : Peripheral clock / 32 + * 0011 : Peripheral clock / 16 + * 0010 : Peripheral clock / 8 + * 0001 : Peripheral clock / 4 + * 0000 : Peripheral clock / 2 + * 1111 : Peripheral clock (sup_pclk set '1') + */ + +struct sh_mmcif_plat_data { + void (*set_pwr)(struct platform_device *pdev, int state); + void (*down_pwr)(struct platform_device *pdev); + u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */ + unsigned long caps; + u32 ocr; +}; + +#endif /* __SH_MMCIF_H__ */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0fa491326c4a..b4d109e389b8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -671,6 +671,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif +#ifdef CONFIG_HAVE_MEMORYLESS_NODES +int local_memory_node(int node_id); +#else +static inline int local_memory_node(int node_id) { return node_id; }; +#endif + #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #endif diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c00cc0c4d0b7..24e5d01d27d0 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -397,7 +397,7 @@ struct xt_table_info { * @stacksize jumps (number of user chains) can possibly be made. */ unsigned int stacksize; - unsigned int *stackptr; + unsigned int __percpu *stackptr; void ***jumpstack; /* ipt_entry tables: one per CPU */ /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 7c3609622334..540703b555cb 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -164,7 +164,10 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, /* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */ static inline int notifier_from_errno(int err) { - return NOTIFY_STOP_MASK | (NOTIFY_OK - err); + if (err) + return NOTIFY_STOP_MASK | (NOTIFY_OK - err); + + return NOTIFY_OK; } /* Restore (negative) errno value from notify return value. */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index aef22ae2af47..5bb13b3db84d 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -40,6 +40,7 @@ enum { PCG_USED, /* this object is in use. */ PCG_ACCT_LRU, /* page has been accounted for */ PCG_FILE_MAPPED, /* page is accounted as "mapped" */ + PCG_MIGRATION, /* under page migration */ }; #define TESTPCGFLAG(uname, lname) \ @@ -79,6 +80,10 @@ SETPCGFLAG(FileMapped, FILE_MAPPED) CLEARPCGFLAG(FileMapped, FILE_MAPPED) TESTPCGFLAG(FileMapped, FILE_MAPPED) +SETPCGFLAG(Migration, MIGRATION) +CLEARPCGFLAG(Migration, MIGRATION) +TESTPCGFLAG(Migration, MIGRATION) + static inline int page_cgroup_nid(struct page_cgroup *pc) { return page_to_nid(pc->page); diff --git a/include/linux/pci.h b/include/linux/pci.h index a327322a33ab..6a471aba3b07 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -311,7 +311,8 @@ struct pci_dev { unsigned int is_virtfn:1; unsigned int reset_fn:1; unsigned int is_hotplug_bridge:1; - unsigned int aer_firmware_first:1; + unsigned int __aer_firmware_first_valid:1; + unsigned int __aer_firmware_first:1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 68567c0b3a5d..ce2dc655cd1d 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -131,11 +131,11 @@ * Declaration/definition used for per-CPU variables that must be page aligned. */ #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ - DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ - DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ + DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ __aligned(PAGE_SIZE) /* diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3fd5c82e0e18..fb6c91eac7e3 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -485,6 +485,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #define PERF_MAX_STACK_DEPTH 255 @@ -587,21 +588,19 @@ struct perf_mmap_data { struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC struct work_struct work; + int page_order; /* allocation order */ #endif - int data_order; int nr_pages; /* nr of data pages */ int writable; /* are we writable */ int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event_id limit */ - atomic_long_t head; /* write position */ - atomic_long_t done_head; /* completed head */ - - atomic_t lock; /* concurrent writes */ - atomic_t wakeup; /* needs a wakeup */ - atomic_t lost; /* nr records lost */ + local_t head; /* write position */ + local_t nest; /* nested writers */ + local_t events; /* event limit */ + local_t wakeup; /* wakeup stamp */ + local_t lost; /* nr records lost */ long watermark; /* wakeup watermark */ @@ -728,6 +727,7 @@ struct perf_event { perf_overflow_handler_t overflow_handler; #ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *tp_event; struct event_filter *filter; #endif @@ -803,11 +803,12 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; struct perf_mmap_data *data; - unsigned long head; - unsigned long offset; + unsigned long wakeup; + unsigned long size; + void *addr; + int page; int nmi; int sample; - int locked; }; #ifdef CONFIG_PERF_EVENTS @@ -993,8 +994,9 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size, struct pt_regs *regs); +extern void perf_tp_event(u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs, + struct hlist_head *head); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/quota.h b/include/linux/quota.h index 7126a15467f1..94c1f03b50eb 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -174,8 +174,7 @@ enum { #include #include #include -#include -#include +#include #include #include @@ -254,6 +253,7 @@ enum { struct dqstats { int stat[_DQST_DQSTAT_LAST]; + struct percpu_counter counter[_DQST_DQSTAT_LAST]; }; extern struct dqstats *dqstats_pcpu; @@ -261,20 +261,12 @@ extern struct dqstats dqstats; static inline void dqstats_inc(unsigned int type) { -#ifdef CONFIG_SMP - per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++; -#else - dqstats.stat[type]++; -#endif + percpu_counter_inc(&dqstats.counter[type]); } static inline void dqstats_dec(unsigned int type) { -#ifdef CONFIG_SMP - per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--; -#else - dqstats.stat[type]--; -#endif + percpu_counter_dec(&dqstats.counter[type]); } #define DQ_MOD_B 0 /* dquot modified since read */ @@ -332,8 +324,8 @@ struct dquot_operations { /* Operations handling requests from userspace */ struct quotactl_ops { - int (*quota_on)(struct super_block *, int, int, char *, int); - int (*quota_off)(struct super_block *, int, int); + int (*quota_on)(struct super_block *, int, int, char *); + int (*quota_off)(struct super_block *, int); int (*quota_sync)(struct super_block *, int, int); int (*get_info)(struct super_block *, int, struct if_dqinfo *); int (*set_info)(struct super_block *, int, struct if_dqinfo *); diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 370abb1e99cb..aa36793b48bd 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -9,6 +9,10 @@ #include +#define DQUOT_SPACE_WARN 0x1 +#define DQUOT_SPACE_RESERVE 0x2 +#define DQUOT_SPACE_NOFAIL 0x4 + static inline struct quota_info *sb_dqopt(struct super_block *sb) { return &sb->s_dquot; @@ -41,15 +45,22 @@ int dquot_scan_active(struct super_block *sb, struct dquot *dquot_alloc(struct super_block *sb, int type); void dquot_destroy(struct dquot *dquot); -int __dquot_alloc_space(struct inode *inode, qsize_t number, - int warn, int reserve); -void __dquot_free_space(struct inode *inode, qsize_t number, int reserve); +int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); +void __dquot_free_space(struct inode *inode, qsize_t number, int flags); int dquot_alloc_inode(const struct inode *inode); int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); void dquot_free_inode(const struct inode *inode); +int dquot_disable(struct super_block *sb, int type, unsigned int flags); +/* Suspend quotas on remount RO */ +static inline int dquot_suspend(struct super_block *sb, int type) +{ + return dquot_disable(sb, type, DQUOT_SUSPENDED); +} +int dquot_resume(struct super_block *sb, int type); + int dquot_commit(struct dquot *dquot); int dquot_acquire(struct dquot *dquot); int dquot_release(struct dquot *dquot); @@ -58,27 +69,25 @@ int dquot_mark_dquot_dirty(struct dquot *dquot); int dquot_file_open(struct inode *inode, struct file *file); -int vfs_quota_on(struct super_block *sb, int type, int format_id, - char *path, int remount); -int vfs_quota_enable(struct inode *inode, int type, int format_id, +int dquot_quota_on(struct super_block *sb, int type, int format_id, + char *path); +int dquot_enable(struct inode *inode, int type, int format_id, unsigned int flags); -int vfs_quota_on_path(struct super_block *sb, int type, int format_id, +int dquot_quota_on_path(struct super_block *sb, int type, int format_id, struct path *path); -int vfs_quota_on_mount(struct super_block *sb, char *qf_name, +int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type); -int vfs_quota_off(struct super_block *sb, int type, int remount); -int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags); -int vfs_quota_sync(struct super_block *sb, int type, int wait); -int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); -int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, +int dquot_quota_off(struct super_block *sb, int type); +int dquot_quota_sync(struct super_block *sb, int type, int wait); +int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); +int dquot_get_dqblk(struct super_block *sb, int type, qid_t id, struct fs_disk_quota *di); -int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, +int dquot_set_dqblk(struct super_block *sb, int type, qid_t id, struct fs_disk_quota *di); int __dquot_transfer(struct inode *inode, struct dquot **transfer_to); int dquot_transfer(struct inode *inode, struct iattr *iattr); -int vfs_dq_quota_on_remount(struct super_block *sb); static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) { @@ -145,20 +154,7 @@ static inline unsigned sb_any_quota_active(struct super_block *sb) * Operations supported for diskquotas. */ extern const struct dquot_operations dquot_operations; -extern const struct quotactl_ops vfs_quotactl_ops; - -#define sb_dquot_ops (&dquot_operations) -#define sb_quotactl_ops (&vfs_quotactl_ops) - -/* Cannot be called inside a transaction */ -static inline int vfs_dq_off(struct super_block *sb, int remount) -{ - int ret = -ENOSYS; - - if (sb->s_qcop && sb->s_qcop->quota_off) - ret = sb->s_qcop->quota_off(sb, -1, remount); - return ret; -} +extern const struct quotactl_ops dquot_quotactl_ops; #else @@ -203,12 +199,6 @@ static inline int sb_any_quota_active(struct super_block *sb) return 0; } -/* - * NO-OP when quota not configured. - */ -#define sb_dquot_ops (NULL) -#define sb_quotactl_ops (NULL) - static inline void dquot_initialize(struct inode *inode) { } @@ -226,39 +216,45 @@ static inline void dquot_free_inode(const struct inode *inode) { } -static inline int vfs_dq_off(struct super_block *sb, int remount) +static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) { return 0; } -static inline int vfs_dq_quota_on_remount(struct super_block *sb) +static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, + int flags) { + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_add_bytes(inode, number); return 0; } -static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) +static inline void __dquot_free_space(struct inode *inode, qsize_t number, + int flags) +{ + if (!(flags & DQUOT_SPACE_RESERVE)) + inode_sub_bytes(inode, number); +} + +static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { + inode_add_bytes(inode, number); return 0; } -static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, - int warn, int reserve) +static inline int dquot_disable(struct super_block *sb, int type, + unsigned int flags) { - if (!reserve) - inode_add_bytes(inode, number); return 0; } -static inline void __dquot_free_space(struct inode *inode, qsize_t number, - int reserve) +static inline int dquot_suspend(struct super_block *sb, int type) { - if (!reserve) - inode_sub_bytes(inode, number); + return 0; } -static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) +static inline int dquot_resume(struct super_block *sb, int type) { - inode_add_bytes(inode, number); return 0; } @@ -268,7 +264,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) { - return __dquot_alloc_space(inode, nr, 1, 0); + return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN); +} + +static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) +{ + __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); + mark_inode_dirty(inode); } static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) @@ -286,6 +288,11 @@ static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr) return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits); } +static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr) +{ + dquot_alloc_space_nofail(inode, nr << inode->i_blkbits); +} + static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) { return dquot_alloc_space(inode, nr << inode->i_blkbits); @@ -293,7 +300,7 @@ static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr) { - return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0); + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0); } static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) @@ -308,7 +315,8 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) { - return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1); + return __dquot_alloc_space(inode, nr << inode->i_blkbits, + DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE); } static inline int dquot_claim_block(struct inode *inode, qsize_t nr) @@ -345,7 +353,7 @@ static inline void dquot_free_block(struct inode *inode, qsize_t nr) static inline void dquot_release_reservation_block(struct inode *inode, qsize_t nr) { - __dquot_free_space(inode, nr << inode->i_blkbits, 1); + __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE); } #endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/random.h b/include/linux/random.h index 25d02fe5c9b5..fb7ab9de5f36 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -40,6 +40,10 @@ struct rand_pool_info { __u32 buf[0]; }; +struct rnd_state { + __u32 s1, s2, s3; +}; + /* Exported functions */ #ifdef __KERNEL__ @@ -74,6 +78,30 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l u32 random32(void); void srandom32(u32 seed); +u32 prandom32(struct rnd_state *); + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom32_seed - set seed for prandom32(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom32_seed(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 1); + state->s2 = __seed(i, 7); + state->s3 = __seed(i, 15); +} + #endif /* __KERNEL___ */ #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/rio.h b/include/linux/rio.h index dc0c75556c63..bd6eb0ed34a7 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -64,10 +64,13 @@ #define RIO_INB_MBOX_RESOURCE 1 #define RIO_OUTB_MBOX_RESOURCE 2 +#define RIO_PW_MSG_SIZE 64 + extern struct bus_type rio_bus_type; extern struct list_head rio_devices; /* list of all devices */ struct rio_mport; +union rio_pw_msg; /** * struct rio_dev - RIO device info @@ -85,11 +88,15 @@ struct rio_mport; * @swpinfo: Switch port info * @src_ops: Source operation capabilities * @dst_ops: Destination operation capabilities + * @comp_tag: RIO component tag + * @phys_efptr: RIO device extended features pointer + * @em_efptr: RIO Error Management features pointer * @dma_mask: Mask of bits of RIO address this device implements * @rswitch: Pointer to &struct rio_switch if valid for this device * @driver: Driver claiming this device * @dev: Device model device * @riores: RIO resources this device owns + * @pwcback: port-write callback function for this device * @destid: Network destination ID */ struct rio_dev { @@ -107,11 +114,15 @@ struct rio_dev { u32 swpinfo; /* Only used for switches */ u32 src_ops; u32 dst_ops; + u32 comp_tag; + u32 phys_efptr; + u32 em_efptr; u64 dma_mask; struct rio_switch *rswitch; /* RIO switch info */ struct rio_driver *driver; /* RIO driver claiming this device */ struct device dev; /* LDM device structure */ struct resource riores[RIO_MAX_DEV_RESOURCES]; + int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step); u16 destid; }; @@ -211,8 +222,14 @@ struct rio_net { * @hopcount: Hopcount to this switch * @destid: Associated destid in the path * @route_table: Copy of switch routing table + * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0 * @add_entry: Callback for switch-specific route add function * @get_entry: Callback for switch-specific route get function + * @clr_table: Callback for switch-specific clear route table function + * @set_domain: Callback for switch-specific domain setting function + * @get_domain: Callback for switch-specific domain get function + * @em_init: Callback for switch-specific error management initialization function + * @em_handle: Callback for switch-specific error management handler function */ struct rio_switch { struct list_head node; @@ -220,10 +237,19 @@ struct rio_switch { u16 hopcount; u16 destid; u8 *route_table; + u32 port_ok; int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 route_port); int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount, u16 table, u16 route_destid, u8 * route_port); + int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table); + int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain); + int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain); + int (*em_init) (struct rio_dev *dev); + int (*em_handle) (struct rio_dev *dev, u8 swport); }; /* Low-level architecture-dependent routines */ @@ -235,6 +261,7 @@ struct rio_switch { * @cread: Callback to perform network read of config space. * @cwrite: Callback to perform network write of config space. * @dsend: Callback to send a doorbell message. + * @pwenable: Callback to enable/disable port-write message handling. */ struct rio_ops { int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len, @@ -246,6 +273,7 @@ struct rio_ops { int (*cwrite) (struct rio_mport *mport, int index, u16 destid, u8 hopcount, u32 offset, int len, u32 data); int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data); + int (*pwenable) (struct rio_mport *mport, int enable); }; #define RIO_RESOURCE_MEM 0x00000100 @@ -302,21 +330,28 @@ struct rio_device_id { }; /** - * struct rio_route_ops - Per-switch route operations + * struct rio_switch_ops - Per-switch operations * @vid: RIO vendor ID * @did: RIO device ID - * @add_hook: Callback that adds a route entry - * @get_hook: Callback that gets a route entry + * @init_hook: Callback that performs switch device initialization * - * Defines the operations that are necessary to manipulate the route - * tables for a particular RIO switch device. + * Defines the operations that are necessary to initialize/control + * a particular RIO switch device. */ -struct rio_route_ops { +struct rio_switch_ops { u16 vid, did; - int (*add_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 route_port); - int (*get_hook) (struct rio_mport * mport, u16 destid, u8 hopcount, - u16 table, u16 route_destid, u8 * route_port); + int (*init_hook) (struct rio_dev *rdev, int do_enum); +}; + +union rio_pw_msg { + struct { + u32 comptag; /* Component Tag CSR */ + u32 errdetect; /* Port N Error Detect CSR */ + u32 is_port; /* Implementation specific + PortID */ + u32 ltlerrdet; /* LTL Error Detect CSR */ + u32 padding[12]; + } em; + u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; }; /* Architecture and hardware-specific functions */ diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h index c93a58a40033..edc55da717b3 100644 --- a/include/linux/rio_drv.h +++ b/include/linux/rio_drv.h @@ -413,6 +413,12 @@ void rio_release_regions(struct rio_dev *); int rio_request_region(struct rio_dev *, int, char *); void rio_release_region(struct rio_dev *, int); +/* Port-Write management */ +extern int rio_request_inb_pwrite(struct rio_dev *, + int (*)(struct rio_dev *, union rio_pw_msg*, int)); +extern int rio_release_inb_pwrite(struct rio_dev *); +extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg); + /* LDM support */ int rio_register_driver(struct rio_driver *); void rio_unregister_driver(struct rio_driver *); diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 919d4e07d54e..db50e1c288b7 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h @@ -20,5 +20,19 @@ #define RIO_VID_TUNDRA 0x000d #define RIO_DID_TSI500 0x0500 +#define RIO_DID_TSI568 0x0568 +#define RIO_DID_TSI572 0x0572 +#define RIO_DID_TSI574 0x0574 +#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */ +#define RIO_DID_TSI577 0x0577 +#define RIO_DID_TSI578 0x0578 + +#define RIO_VID_IDT 0x0038 +#define RIO_DID_IDT70K200 0x0310 +#define RIO_DID_IDTCPS8 0x035c +#define RIO_DID_IDTCPS12 0x035d +#define RIO_DID_IDTCPS16 0x035b +#define RIO_DID_IDTCPS6Q 0x035f +#define RIO_DID_IDTCPS10Q 0x035e #endif /* LINUX_RIO_IDS_H */ diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index 326540f9b54e..aedee0489fb4 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -39,6 +39,8 @@ #define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ #define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ #define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ +#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ +#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ #define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */ #define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */ @@ -91,7 +93,10 @@ #define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */ #define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */ - /* 0x20-0x3c *//* Reserved */ + /* 0x20-0x30 *//* Reserved */ + +#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ +#define RIO_RT_MAX_DESTID 0x0000ffff #define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ @@ -153,7 +158,11 @@ #define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */ #define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */ - /* 0x70-0xf8 *//* Reserved */ +#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70 +#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74 +#define RIO_STD_RTE_DEFAULT_PORT 0x78 + + /* 0x7c-0xf8 *//* Reserved */ /* 0x100-0xfff8 *//* [I] Extended Features Space */ /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */ @@ -183,9 +192,14 @@ #define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */ #define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */ #define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */ +#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */ +#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */ +#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */ #define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */ #define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */ #define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */ +#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */ +#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */ /* * Physical 8/16 LP-LVDS @@ -201,15 +215,71 @@ #define RIO_PORT_MNT_HEADER 0x0000 #define RIO_PORT_REQ_CTL_CSR 0x0020 #define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */ +#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */ +#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */ #define RIO_PORT_GEN_CTL_CSR 0x003c #define RIO_PORT_GEN_HOST 0x80000000 #define RIO_PORT_GEN_MASTER 0x40000000 #define RIO_PORT_GEN_DISCOVERED 0x20000000 #define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */ #define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */ +#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */ +#define RIO_PORT_N_MNT_RSP_ASTAT 0x000003e0 /* ackID Status */ +#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */ #define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */ -#define RIO_PORT_N_ERR_STS_CSR(x) (0x58 + x*0x20) -#define PORT_N_ERR_STS_PORT_OK 0x00000002 -#define RIO_PORT_N_CTL_CSR(x) (0x5c + x*0x20) +#define RIO_PORT_N_ACK_CLEAR 0x80000000 +#define RIO_PORT_N_ACK_INBOUND 0x1f000000 +#define RIO_PORT_N_ACK_OUTSTAND 0x00001f00 +#define RIO_PORT_N_ACK_OUTBOUND 0x0000001f +#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20) +#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */ +#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */ +#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */ +#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004 +#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002 +#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001 +#define RIO_PORT_N_ERR_STS_CLR_MASK 0x07120204 +#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20) +#define RIO_PORT_N_CTL_PWIDTH 0xc0000000 +#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000 +#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000 +#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001 +#define RIO_PORT_N_CTL_LOCKOUT 0x00000002 +#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000 +#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000 +#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000 +#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000 + +/* + * Error Management Extensions (RapidIO 1.3+, Part 8) + * + * Extended Features Block ID=0x0007 + */ + +/* General EM Registers (Common for all Ports) */ + +#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */ +#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */ +#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */ +#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */ +#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */ +#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */ +#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */ +#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */ +#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */ + +/* Per-Port EM Registers */ + +#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */ +#define REM_PED_IMPL_SPEC 0x80000000 +#define REM_PED_LINK_TO 0x00000001 +#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */ +#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */ +#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */ +#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */ +#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */ +#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */ +#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */ +#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */ #endif /* LINUX_RIO_REGS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c0151ffd3541..f118809c953f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -268,7 +268,6 @@ extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(int cpu); -extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) @@ -527,8 +526,9 @@ struct thread_group_cputimer { * the locking of signal_struct. */ struct signal_struct { - atomic_t count; + atomic_t sigcnt; atomic_t live; + int nr_threads; wait_queue_head_t wait_chldexit; /* for wait4() */ @@ -1423,6 +1423,7 @@ struct task_struct { nodemask_t mems_allowed; /* Protected by alloc_lock */ int mems_allowed_change_disable; int cpuset_mem_spread_rotor; + int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS /* Control Group info protected by css_set_lock */ @@ -2035,7 +2036,7 @@ extern int do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); -extern void zap_other_threads(struct task_struct *p); +extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); @@ -2100,7 +2101,6 @@ extern void flush_thread(void); extern void exit_thread(void); extern void exit_files(struct task_struct *); -extern void __cleanup_signal(struct signal_struct *); extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); @@ -2147,6 +2147,11 @@ extern bool current_is_single_threaded(void); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) +static inline int get_nr_threads(struct task_struct *tsk) +{ + return tsk->signal->nr_threads; +} + /* de_thread depends on thread_group_leader not being a pid based check */ #define thread_group_leader(p) (p == p->group_leader) @@ -2393,10 +2398,6 @@ static inline void thread_group_cputime_init(struct signal_struct *sig) spin_lock_init(&sig->cputimer.lock); } -static inline void thread_group_cputime_free(struct signal_struct *sig) -{ -} - /* * Reevaluate whether the task has signals pending delivery. * Wake the task if so. diff --git a/include/linux/sdhci-pltfm.h b/include/linux/sdhci-pltfm.h new file mode 100644 index 000000000000..0239bd70241e --- /dev/null +++ b/include/linux/sdhci-pltfm.h @@ -0,0 +1,35 @@ +/* + * Platform data declarations for the sdhci-pltfm driver. + * + * Copyright (c) 2010 MontaVista Software, LLC. + * + * Author: Anton Vorontsov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#ifndef _SDHCI_PLTFM_H +#define _SDHCI_PLTFM_H + +struct sdhci_ops; +struct sdhci_host; + +/** + * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks + * @ops: optional pointer to the platform-provided SDHCI ops + * @quirks: optional SDHCI quirks + * @init: optional hook that is called during device probe, before the + * driver tries to access any SDHCI registers + * @exit: optional hook that is called during device removal + */ +struct sdhci_pltfm_data { + struct sdhci_ops *ops; + unsigned int quirks; + int (*init)(struct sdhci_host *host); + void (*exit)(struct sdhci_host *host); +}; + +#endif /* _SDHCI_PLTFM_H */ diff --git a/include/linux/sem.h b/include/linux/sem.h index 8a4adbef8a0f..f2961afa2f66 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -79,6 +79,7 @@ struct seminfo { #ifdef __KERNEL__ #include #include +#include struct task_struct; @@ -91,7 +92,8 @@ struct sem { /* One sem_array data structure for each set of semaphores in the system. */ struct sem_array { - struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ + struct kern_ipc_perm ____cacheline_aligned_in_smp + sem_perm; /* permissions .. see ipc.h */ time_t sem_otime; /* last semop time */ time_t sem_ctime; /* last change time */ struct sem *sem_base; /* ptr to first semaphore in array */ diff --git a/include/linux/sfi.h b/include/linux/sfi.h index 9a6f7607174e..0299b4ce63db 100644 --- a/include/linux/sfi.h +++ b/include/linux/sfi.h @@ -73,6 +73,8 @@ #define SFI_SIG_SPIB "SPIB" #define SFI_SIG_I2CB "I2CB" #define SFI_SIG_GPEM "GPEM" +#define SFI_SIG_DEVS "DEVS" +#define SFI_SIG_GPIO "GPIO" #define SFI_SIGNATURE_SIZE 4 #define SFI_OEM_ID_SIZE 6 @@ -145,6 +147,27 @@ struct sfi_rtc_table_entry { u32 irq; } __packed; +struct sfi_device_table_entry { + u8 type; /* bus type, I2C, SPI or ...*/ +#define SFI_DEV_TYPE_SPI 0 +#define SFI_DEV_TYPE_I2C 1 +#define SFI_DEV_TYPE_UART 2 +#define SFI_DEV_TYPE_HSI 3 +#define SFI_DEV_TYPE_IPC 4 + + u8 host_num; /* attached to host 0, 1...*/ + u16 addr; + u8 irq; + u32 max_freq; + char name[16]; +} __packed; + +struct sfi_gpio_table_entry { + char controller_name[16]; + u16 pin_no; + char pin_name[16]; +} __packed; + struct sfi_spi_table_entry { u16 host_num; /* attached to host 0, 1...*/ u16 cs; /* chip select */ @@ -166,7 +189,6 @@ struct sfi_gpe_table_entry { u16 phys_id; /* physical GPE id */ } __packed; - typedef int (*sfi_table_handler) (struct sfi_table_header *table); #ifdef CONFIG_SFI diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7cdfb4d52847..bf243fc54959 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -501,7 +501,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, 1, -1); } -extern int skb_recycle_check(struct sk_buff *skb, int skb_size); +extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 55695c8d2f8a..4ba59cfc1f75 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -75,12 +75,6 @@ struct kmem_cache { int offset; /* Free pointer offset. */ struct kmem_cache_order_objects oo; - /* - * Avoid an extra cache line for UP, SMP and for the node local to - * struct kmem_cache. - */ - struct kmem_cache_node local_node; - /* Allocation and freeing of slabs */ struct kmem_cache_order_objects max; struct kmem_cache_order_objects min; @@ -102,6 +96,9 @@ struct kmem_cache { */ int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; +#else + /* Avoid an extra cache line for UP */ + struct kmem_cache_node local_node; #endif }; @@ -140,7 +137,7 @@ struct kmem_cache { #ifdef CONFIG_ZONE_DMA #define SLUB_DMA __GFP_DMA /* Reserve extra caches for potential DMA use */ -#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) +#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) #else /* Disable DMA functionality */ #define SLUB_DMA (__force gfp_t)0 diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 89fac6a3f78b..f8854655860e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -60,7 +60,7 @@ /* * Must define these before including other files, inline functions need them */ -#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME +#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME #define LOCK_SECTION_START(extra) \ ".subsection 1\n\t" \ diff --git a/include/linux/swap.h b/include/linux/swap.h index b6b614364dd8..ff4acea9bbdb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -282,6 +282,11 @@ extern void kswapd_stop(int nid); extern int shmem_unuse(swp_entry_t entry, struct page *page); #endif /* CONFIG_MMU */ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +extern void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, + struct page **pagep, swp_entry_t *ent); +#endif + extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); #ifdef CONFIG_SWAP diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index febedcf67c7e..81a4e213c6cf 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -73,16 +73,6 @@ extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir); -extern void -swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir); - -extern void -swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, - enum dma_data_direction dir); - extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 057929b0a651..a1a86a53bc73 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -103,22 +103,6 @@ struct perf_event_attr; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) -#ifdef CONFIG_PERF_EVENTS - -#define TRACE_SYS_ENTER_PERF_INIT(sname) \ - .perf_event_enable = perf_sysenter_enable, \ - .perf_event_disable = perf_sysenter_disable, - -#define TRACE_SYS_EXIT_PERF_INIT(sname) \ - .perf_event_enable = perf_sysexit_enable, \ - .perf_event_disable = perf_sysexit_disable, -#else -#define TRACE_SYS_ENTER_PERF(sname) -#define TRACE_SYS_ENTER_PERF_INIT(sname) -#define TRACE_SYS_EXIT_PERF(sname) -#define TRACE_SYS_EXIT_PERF_INIT(sname) -#endif /* CONFIG_PERF_EVENTS */ - #ifdef CONFIG_FTRACE_SYSCALLS #define __SC_STR_ADECL1(t, a) #a #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) @@ -134,54 +118,43 @@ struct perf_event_attr; #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) +extern struct ftrace_event_class event_class_syscall_enter; +extern struct ftrace_event_class event_class_syscall_exit; +extern struct trace_event_functions enter_syscall_print_funcs; +extern struct trace_event_functions exit_syscall_print_funcs; + #define SYSCALL_TRACE_ENTER_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ - static struct trace_event enter_syscall_print_##sname = { \ - .trace = print_syscall_enter, \ - }; \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ event_enter_##sname = { \ .name = "sys_enter"#sname, \ - .system = "syscalls", \ - .event = &enter_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ - .define_fields = syscall_enter_define_fields, \ - .regfunc = reg_event_syscall_enter, \ - .unregfunc = unreg_event_syscall_enter, \ + .class = &event_class_syscall_enter, \ + .event.funcs = &enter_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ - static const struct syscall_metadata __syscall_meta_##sname; \ + static struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ - static struct trace_event exit_syscall_print_##sname = { \ - .trace = print_syscall_exit, \ - }; \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) \ event_exit_##sname = { \ .name = "sys_exit"#sname, \ - .system = "syscalls", \ - .event = &exit_syscall_print_##sname, \ - .raw_init = init_syscall_trace, \ - .define_fields = syscall_exit_define_fields, \ - .regfunc = reg_event_syscall_exit, \ - .unregfunc = unreg_event_syscall_exit, \ + .class = &event_class_syscall_exit, \ + .event.funcs = &exit_syscall_print_funcs, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ SYSCALL_TRACE_ENTER_EVENT(sname); \ SYSCALL_TRACE_EXIT_EVENT(sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta_##sname = { \ @@ -191,12 +164,14 @@ struct perf_event_attr; .args = args_##sname, \ .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ SYSCALL_TRACE_ENTER_EVENT(_##sname); \ SYSCALL_TRACE_EXIT_EVENT(_##sname); \ - static const struct syscall_metadata __used \ + static struct syscall_metadata __used \ __attribute__((__aligned__(4))) \ __attribute__((section("__syscalls_metadata"))) \ __syscall_meta__##sname = { \ @@ -204,6 +179,8 @@ struct perf_event_attr; .nb_args = 0, \ .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ + .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ + .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/linux/threads.h b/include/linux/threads.h index 052b12bec8bd..383ab9592bec 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -33,4 +33,13 @@ #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) +/* + * Define a minimum number of pids per cpu. Heuristically based + * on original pid max of 32k for 32 cpus. Also, increase the + * minimum settable value for pid_max on the running system based + * on similar defaults. See kernel/pid.c:pidmap_init() for details. + */ +#define PIDS_PER_CPU_DEFAULT 1024 +#define PIDS_PER_CPU_MIN 8 + #endif diff --git a/include/linux/topology.h b/include/linux/topology.h index 5b81156780b1..c44df50a05ab 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #ifndef node_has_online_mem @@ -203,8 +204,114 @@ int arch_update_cpu_topology(void); #ifndef SD_NODE_INIT #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! #endif + #endif /* CONFIG_NUMA */ +#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID +DECLARE_PER_CPU(int, numa_node); + +#ifndef numa_node_id +/* Returns the number of the current Node. */ +static inline int numa_node_id(void) +{ + return __this_cpu_read(numa_node); +} +#endif + +#ifndef cpu_to_node +static inline int cpu_to_node(int cpu) +{ + return per_cpu(numa_node, cpu); +} +#endif + +#ifndef set_numa_node +static inline void set_numa_node(int node) +{ + percpu_write(numa_node, node); +} +#endif + +#ifndef set_cpu_numa_node +static inline void set_cpu_numa_node(int cpu, int node) +{ + per_cpu(numa_node, cpu) = node; +} +#endif + +#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */ + +/* Returns the number of the current Node. */ +#ifndef numa_node_id +static inline int numa_node_id(void) +{ + return cpu_to_node(raw_smp_processor_id()); +} +#endif + +#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */ + +#ifdef CONFIG_HAVE_MEMORYLESS_NODES + +/* + * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. + * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. + * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). + */ +DECLARE_PER_CPU(int, _numa_mem_); + +#ifndef set_numa_mem +static inline void set_numa_mem(int node) +{ + percpu_write(_numa_mem_, node); +} +#endif + +#ifndef numa_mem_id +/* Returns the number of the nearest Node with memory */ +static inline int numa_mem_id(void) +{ + return __this_cpu_read(_numa_mem_); +} +#endif + +#ifndef cpu_to_mem +static inline int cpu_to_mem(int cpu) +{ + return per_cpu(_numa_mem_, cpu); +} +#endif + +#ifndef set_cpu_numa_mem +static inline void set_cpu_numa_mem(int cpu, int node) +{ + per_cpu(_numa_mem_, cpu) = node; +} +#endif + +#else /* !CONFIG_HAVE_MEMORYLESS_NODES */ + +static inline void set_numa_mem(int node) {} + +static inline void set_cpu_numa_mem(int cpu, int node) {} + +#ifndef numa_mem_id +/* Returns the number of the nearest Node with memory */ +static inline int numa_mem_id(void) +{ + return numa_node_id(); +} +#endif + +#ifndef cpu_to_mem +static inline int cpu_to_mem(int cpu) +{ + return cpu_to_node(cpu); +} +#endif + +#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */ + #ifndef topology_physical_package_id #define topology_physical_package_id(cpu) ((void)(cpu), -1) #endif @@ -218,9 +325,4 @@ int arch_update_cpu_topology(void); #define topology_core_cpumask(cpu) cpumask_of(cpu) #endif -/* Returns the number of the current Node. */ -#ifndef numa_node_id -#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) -#endif - #endif /* _LINUX_TOPOLOGY_H */ diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1d85f9a6a199..9a59d1f98cd4 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -20,12 +20,17 @@ struct module; struct tracepoint; +struct tracepoint_func { + void *func; + void *data; +}; + struct tracepoint { const char *name; /* Tracepoint name */ int state; /* State. */ void (*regfunc)(void); void (*unregfunc)(void); - void **funcs; + struct tracepoint_func *funcs; } __attribute__((aligned(32))); /* * Aligned on 32 bytes because it is * globally visible and gcc happily @@ -37,16 +42,19 @@ struct tracepoint { * Connect a probe to a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_register(const char *name, void *probe); +extern int tracepoint_probe_register(const char *name, void *probe, void *data); /* * Disconnect a probe from a tracepoint. * Internal API, should not be used directly. */ -extern int tracepoint_probe_unregister(const char *name, void *probe); +extern int +tracepoint_probe_unregister(const char *name, void *probe, void *data); -extern int tracepoint_probe_register_noupdate(const char *name, void *probe); -extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); +extern int tracepoint_probe_register_noupdate(const char *name, void *probe, + void *data); +extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, + void *data); extern void tracepoint_probe_update_all(void); struct tracepoint_iter { @@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, /* * it_func[0] is never NULL because there is at least one element in the array * when the array itself is non NULL. + * + * Note, the proto and args passed in includes "__data" as the first parameter. + * The reason for this is to handle the "void" prototype. If a tracepoint + * has a "void" prototype, then it is invalid to declare a function + * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just + * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". */ #define __DO_TRACE(tp, proto, args) \ do { \ - void **it_func; \ + struct tracepoint_func *it_func_ptr; \ + void *it_func; \ + void *__data; \ \ rcu_read_lock_sched_notrace(); \ - it_func = rcu_dereference_sched((tp)->funcs); \ - if (it_func) { \ + it_func_ptr = rcu_dereference_sched((tp)->funcs); \ + if (it_func_ptr) { \ do { \ - ((void(*)(proto))(*it_func))(args); \ - } while (*(++it_func)); \ + it_func = (it_func_ptr)->func; \ + __data = (it_func_ptr)->data; \ + ((void(*)(proto))(it_func))(args); \ + } while ((++it_func_ptr)->func); \ } \ rcu_read_unlock_sched_notrace(); \ } while (0) @@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. */ -#define DECLARE_TRACE(name, proto, args) \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ extern struct tracepoint __tracepoint_##name; \ static inline void trace_##name(proto) \ { \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ - TP_PROTO(proto), TP_ARGS(args)); \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args)); \ + } \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), void *data) \ + { \ + return tracepoint_probe_register(#name, (void *)probe, \ + data); \ } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), void *data) \ { \ - return tracepoint_probe_register(#name, (void *)probe); \ + return tracepoint_probe_unregister(#name, (void *)probe, \ + data); \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline void \ + check_trace_callback_type_##name(void (*cb)(data_proto)) \ { \ - return tracepoint_probe_unregister(#name, (void *)probe);\ } - #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ __attribute__((section("__tracepoints_strings"))) = #name; \ @@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, EXPORT_SYMBOL(__tracepoint_##name) #else /* !CONFIG_TRACEPOINTS */ -#define DECLARE_TRACE(name, proto, args) \ - static inline void _do_trace_##name(struct tracepoint *tp, proto) \ - { } \ +#define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \ static inline void trace_##name(proto) \ { } \ - static inline int register_trace_##name(void (*probe)(proto)) \ + static inline int \ + register_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ } \ - static inline int unregister_trace_##name(void (*probe)(proto)) \ + static inline int \ + unregister_trace_##name(void (*probe)(data_proto), \ + void *data) \ { \ return -ENOSYS; \ + } \ + static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ + { \ } #define DEFINE_TRACE_FN(name, reg, unreg) @@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, #define EXPORT_TRACEPOINT_SYMBOL(name) #endif /* CONFIG_TRACEPOINTS */ + +/* + * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype + * (void). "void" is a special value in a function prototype and can + * not be combined with other arguments. Since the DECLARE_TRACE() + * macro adds a data element at the beginning of the prototype, + * we need a way to differentiate "(void *data, proto)" from + * "(void *data, void)". The second prototype is invalid. + * + * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype + * and "void *__data" as the callback prototype. + * + * DECLARE_TRACE() passes "proto" as the tracepoint protoype and + * "void *__data, proto" as the callback prototype. + */ +#define DECLARE_TRACE_NOARGS(name) \ + __DECLARE_TRACE(name, void, , void *__data, __data) + +#define DECLARE_TRACE(name, proto, args) \ + __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ + PARAMS(void *__data, proto), \ + PARAMS(__data, args)) + #endif /* DECLARE_TRACE */ #ifndef TRACE_EVENT diff --git a/include/linux/uinput.h b/include/linux/uinput.h index 15ddd4483b09..60c81da77f0f 100644 --- a/include/linux/uinput.h +++ b/include/linux/uinput.h @@ -166,11 +166,11 @@ struct uinput_ff_erase { struct uinput_user_dev { char name[UINPUT_MAX_NAME_SIZE]; struct input_id id; - int ff_effects_max; - int absmax[ABS_MAX + 1]; - int absmin[ABS_MAX + 1]; - int absfuzz[ABS_MAX + 1]; - int absflat[ABS_MAX + 1]; + int ff_effects_max; + int absmax[ABS_CNT]; + int absmin[ABS_CNT]; + int absfuzz[ABS_CNT]; + int absflat[ABS_CNT]; }; #endif /* __UINPUT_H_ */ diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 2389f93a28b5..92f1d99f0f17 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -105,6 +105,22 @@ struct uac_as_header_descriptor_v2 { __u8 iChannelNames; } __attribute__((packed)); +/* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */ + +struct uac2_iso_endpoint_descriptor { + __u8 bLength; /* in bytes: 8 */ + __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ + __u8 bDescriptorSubtype; /* EP_GENERAL */ + __u8 bmAttributes; + __u8 bmControls; + __u8 bLockDelayUnits; + __le16 wLockDelay; +} __attribute__((packed)); + +#define UAC2_CONTROL_PITCH (3 << 0) +#define UAC2_CONTROL_DATA_OVERRUN (3 << 2) +#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) + /* 6.1 Interrupt Data Message */ #define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) diff --git a/include/linux/uuid.h b/include/linux/uuid.h new file mode 100644 index 000000000000..5b7efbfcee4e --- /dev/null +++ b/include/linux/uuid.h @@ -0,0 +1,70 @@ +/* + * UUID/GUID definition + * + * Copyright (C) 2010, Intel Corp. + * Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_UUID_H_ +#define _LINUX_UUID_H_ + +#include +#include + +typedef struct { + __u8 b[16]; +} uuid_le; + +typedef struct { + __u8 b[16]; +} uuid_be; + +#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_le) \ +{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ +((uuid_be) \ +{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ + ((b) >> 8) & 0xff, (b) & 0xff, \ + ((c) >> 8) & 0xff, (c) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define NULL_UUID_LE \ + UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00) + +#define NULL_UUID_BE \ + UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00) + +static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) +{ + return memcmp(&u1, &u2, sizeof(uuid_le)); +} + +static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) +{ + return memcmp(&u1, &u2, sizeof(uuid_be)); +} + +extern void uuid_le_gen(uuid_le *u); +extern void uuid_be_gen(uuid_be *u); + +#endif diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 6cf44866cecd..726cc3536409 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -39,7 +39,7 @@ extern int net_cls_subsys_id; static inline u32 task_cls_classid(struct task_struct *p) { int id; - u32 classid; + u32 classid = 0; if (in_interrupt()) return 0; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6173c619913a..4b860116e096 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -876,7 +876,7 @@ struct sctp_transport { /* Reference counting. */ atomic_t refcnt; - int dead:1, + __u32 dead:1, /* RTO-Pending : A flag used to track if one of the DATA * chunks sent to this address is currently being * used to compute a RTT. If this flag is 0, diff --git a/include/net/sock.h b/include/net/sock.h index d2a71b04a5ae..731150d52799 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1026,15 +1026,23 @@ extern void release_sock(struct sock *sk); SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -static inline void lock_sock_bh(struct sock *sk) +extern bool lock_sock_fast(struct sock *sk); +/** + * unlock_sock_fast - complement of lock_sock_fast + * @sk: socket + * @slow: slow mode + * + * fast unlock socket for user context. + * If slow mode is on, we call regular release_sock() + */ +static inline void unlock_sock_fast(struct sock *sk, bool slow) { - spin_lock_bh(&sk->sk_lock.slock); + if (slow) + release_sock(sk); + else + spin_unlock_bh(&sk->sk_lock.slock); } -static inline void unlock_sock_bh(struct sock *sk) -{ - spin_unlock_bh(&sk->sk_lock.slock); -} extern struct sock *sk_alloc(struct net *net, int family, gfp_t priority, @@ -1516,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) -{ - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces - number of warnings when compiling with -W --ANK - */ - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= - (unsigned)sk->sk_rcvbuf) - return -ENOMEM; - skb_set_owner_r(skb, sk); - skb_queue_tail(&sk->sk_error_queue, skb); - if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk, skb->len); - return 0; -} +extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); /* * Recover an error report and clear atomically diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 2aa6aa3e8f61..f5b1ba90e952 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -353,7 +353,7 @@ TRACE_EVENT(ext4_discard_blocks, jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count) ); -TRACE_EVENT(ext4_mb_new_inode_pa, +DECLARE_EVENT_CLASS(ext4__mb_new_pa, TP_PROTO(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa), @@ -381,32 +381,20 @@ TRACE_EVENT(ext4_mb_new_inode_pa, __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) ); -TRACE_EVENT(ext4_mb_new_group_pa, +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, + TP_PROTO(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa), - TP_ARGS(ac, pa), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( __u64, pa_pstart ) - __field( __u32, pa_len ) - __field( __u64, pa_lstart ) + TP_ARGS(ac, pa) +); - ), +DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa, - TP_fast_assign( - __entry->dev = ac->ac_sb->s_dev; - __entry->ino = ac->ac_inode->i_ino; - __entry->pa_pstart = pa->pa_pstart; - __entry->pa_len = pa->pa_len; - __entry->pa_lstart = pa->pa_lstart; - ), + TP_PROTO(struct ext4_allocation_context *ac, + struct ext4_prealloc_space *pa), - TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart) + TP_ARGS(ac, pa) ); TRACE_EVENT(ext4_mb_release_inode_pa, @@ -618,9 +606,9 @@ TRACE_EVENT(ext4_free_blocks, ); TRACE_EVENT(ext4_sync_file, - TP_PROTO(struct file *file, struct dentry *dentry, int datasync), + TP_PROTO(struct file *file, int datasync), - TP_ARGS(file, dentry, datasync), + TP_ARGS(file, datasync), TP_STRUCT__entry( __field( dev_t, dev ) @@ -630,6 +618,8 @@ TRACE_EVENT(ext4_sync_file, ), TP_fast_assign( + struct dentry *dentry = file->f_path.dentry; + __entry->dev = dentry->d_inode->i_sb->s_dev; __entry->ino = dentry->d_inode->i_ino; __entry->datasync = datasync; @@ -790,7 +780,7 @@ TRACE_EVENT(ext4_mballoc_prealloc, __entry->result_len, __entry->result_logical) ); -TRACE_EVENT(ext4_mballoc_discard, +DECLARE_EVENT_CLASS(ext4__mballoc, TP_PROTO(struct ext4_allocation_context *ac), TP_ARGS(ac), @@ -819,33 +809,18 @@ TRACE_EVENT(ext4_mballoc_discard, __entry->result_len, __entry->result_logical) ); -TRACE_EVENT(ext4_mballoc_free, +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard, + TP_PROTO(struct ext4_allocation_context *ac), - TP_ARGS(ac), + TP_ARGS(ac) +); - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( __u32, result_logical ) - __field( int, result_start ) - __field( __u32, result_group ) - __field( int, result_len ) - ), +DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free, - TP_fast_assign( - __entry->dev = ac->ac_inode->i_sb->s_dev; - __entry->ino = ac->ac_inode->i_ino; - __entry->result_logical = ac->ac_b_ex.fe_logical; - __entry->result_start = ac->ac_b_ex.fe_start; - __entry->result_group = ac->ac_b_ex.fe_group; - __entry->result_len = ac->ac_b_ex.fe_len; - ), + TP_PROTO(struct ext4_allocation_context *ac), - TP_printk("dev %s inode %lu extent %u/%d/%u@%u ", - jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, - __entry->result_group, __entry->result_start, - __entry->result_len, __entry->result_logical) + TP_ARGS(ac) ); TRACE_EVENT(ext4_forget, @@ -974,6 +949,39 @@ TRACE_EVENT(ext4_da_release_space, __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) ); +DECLARE_EVENT_CLASS(ext4__bitmap_load, + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( __u32, group ) + + ), + + TP_fast_assign( + __entry->dev = sb->s_dev; + __entry->group = group; + ), + + TP_printk("dev %s group %u", + jbd2_dev_to_name(__entry->dev), __entry->group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); + +DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) +); #endif /* _TRACE_EXT4_H */ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 88c59c13ea7b..3d685d1f2a03 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -62,10 +62,13 @@ struct trace_entry ent; \ tstruct \ char __data[0]; \ - }; + }; \ + \ + static struct ftrace_event_class event_class_##name; + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ - static struct ftrace_event_call \ + static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) event_##name #undef DEFINE_EVENT_PRINT @@ -147,7 +150,7 @@ * * entry = iter->ent; * - * if (entry->type != event_.id) { + * if (entry->type != event_->event.type) { * WARN_ON_ONCE(1); * return TRACE_TYPE_UNHANDLED; * } @@ -206,18 +209,22 @@ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace enum print_line_t \ -ftrace_raw_output_id_##call(int event_id, const char *name, \ - struct trace_iterator *iter, int flags) \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ { \ + struct ftrace_event_call *event; \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##call *field; \ struct trace_entry *entry; \ struct trace_seq *p; \ int ret; \ \ + event = container_of(trace_event, struct ftrace_event_call, \ + event); \ + \ entry = iter->ent; \ \ - if (entry->type != event_id) { \ + if (entry->type != event->event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ @@ -226,7 +233,7 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ \ p = &get_cpu_var(ftrace_event_seq); \ trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", name); \ + ret = trace_seq_printf(s, "%s: ", event->name); \ if (ret) \ ret = trace_seq_printf(s, print); \ put_cpu(); \ @@ -234,21 +241,16 @@ ftrace_raw_output_id_##call(int event_id, const char *name, \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ -} - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ -static notrace enum print_line_t \ -ftrace_raw_output_##name(struct trace_iterator *iter, int flags) \ -{ \ - return ftrace_raw_output_id_##template(event_##name.id, \ - #name, iter, flags); \ -} +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ +ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *event) \ { \ struct trace_seq *s = &iter->seq; \ struct ftrace_raw_##template *field; \ @@ -258,7 +260,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ \ entry = iter->ent; \ \ - if (entry->type != event_##call.id) { \ + if (entry->type != event_##call.event.type) { \ WARN_ON_ONCE(1); \ return TRACE_TYPE_UNHANDLED; \ } \ @@ -275,7 +277,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ return TRACE_TYPE_PARTIAL_LINE; \ \ return TRACE_TYPE_HANDLED; \ -} +} \ +static struct trace_event_functions ftrace_event_type_funcs_##call = { \ + .trace = ftrace_raw_output_##call, \ +}; #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -381,80 +386,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#ifdef CONFIG_PERF_EVENTS - -/* - * Generate the functions needed for tracepoint perf_event support. - * - * NOTE: The insertion profile callback (ftrace_profile_) is defined later - * - * static int ftrace_profile_enable_(void) - * { - * return register_trace_(ftrace_profile_); - * } - * - * static void ftrace_profile_disable_(void) - * { - * unregister_trace_(ftrace_profile_); - * } - * - */ - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ - \ -static void perf_trace_##name(proto); \ - \ -static notrace int \ -perf_trace_enable_##name(struct ftrace_event_call *unused) \ -{ \ - return register_trace_##name(perf_trace_##name); \ -} \ - \ -static notrace void \ -perf_trace_disable_##name(struct ftrace_event_call *unused) \ -{ \ - unregister_trace_##name(perf_trace_##name); \ -} - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - -#endif /* CONFIG_PERF_EVENTS */ - /* * Stage 4 of the trace events. * * Override the macros in to include the following: * - * static void ftrace_event_(proto) - * { - * event_trace_printk(_RET_IP_, ": " ); - * } - * - * static int ftrace_reg_event_(struct ftrace_event_call *unused) - * { - * return register_trace_(ftrace_event_); - * } - * - * static void ftrace_unreg_event_(struct ftrace_event_call *unused) - * { - * unregister_trace_(ftrace_event_); - * } - * - * * For those macros defined with TRACE_EVENT: * * static struct ftrace_event_call event_; * - * static void ftrace_raw_event_(proto) + * static void ftrace_raw_event_(void *__data, proto) * { + * struct ftrace_event_call *event_call = __data; * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ring_buffer_event *event; * struct ftrace_raw_ *entry; <-- defined in stage 1 @@ -469,7 +412,7 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * __data_size = ftrace_get_offsets_(&__data_offsets, args); * * event = trace_current_buffer_lock_reserve(&buffer, - * event_.id, + * event_->event.type, * sizeof(*entry) + __data_size, * irq_flags, pc); * if (!event) @@ -484,43 +427,42 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ * event, irq_flags, pc); * } * - * static int ftrace_raw_reg_event_(struct ftrace_event_call *unused) - * { - * return register_trace_(ftrace_raw_event_); - * } - * - * static void ftrace_unreg_event_(struct ftrace_event_call *unused) - * { - * unregister_trace_(ftrace_raw_event_); - * } - * * static struct trace_event ftrace_event_type_ = { * .trace = ftrace_raw_output_, <-- stage 2 * }; * * static const char print_fmt_[] = ; * + * static struct ftrace_event_class __used event_class_